mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-12-27 01:00:34 +08:00
[breaking] migrate from setuptools to uv (#9673)
Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com> Co-authored-by: hiyouga <16256802+hiyouga@users.noreply.github.com>
This commit is contained in:
8
.github/workflows/docker.yml
vendored
8
.github/workflows/docker.yml
vendored
@@ -7,7 +7,7 @@ on:
|
|||||||
- "main"
|
- "main"
|
||||||
paths:
|
paths:
|
||||||
- "**/*.py"
|
- "**/*.py"
|
||||||
- "requirements.txt"
|
- "pyproject.toml"
|
||||||
- "docker/**"
|
- "docker/**"
|
||||||
- ".github/workflows/*.yml"
|
- ".github/workflows/*.yml"
|
||||||
pull_request:
|
pull_request:
|
||||||
@@ -15,7 +15,7 @@ on:
|
|||||||
- "main"
|
- "main"
|
||||||
paths:
|
paths:
|
||||||
- "**/*.py"
|
- "**/*.py"
|
||||||
- "requirements.txt"
|
- "pyproject.toml"
|
||||||
- "docker/**"
|
- "docker/**"
|
||||||
- ".github/workflows/*.yml"
|
- ".github/workflows/*.yml"
|
||||||
release:
|
release:
|
||||||
@@ -64,7 +64,7 @@ jobs:
|
|||||||
id: version
|
id: version
|
||||||
run: |
|
run: |
|
||||||
if [ "${{ github.event_name }}" = "release" ]; then
|
if [ "${{ github.event_name }}" = "release" ]; then
|
||||||
echo "tag=$(python setup.py --version)" >> "$GITHUB_OUTPUT"
|
echo "tag=$(grep -oP 'VERSION = "\K[^"]+' src/llamafactory/extras/env.py)" >> "$GITHUB_OUTPUT"
|
||||||
else
|
else
|
||||||
echo "tag=latest" >> "$GITHUB_OUTPUT"
|
echo "tag=latest" >> "$GITHUB_OUTPUT"
|
||||||
fi
|
fi
|
||||||
@@ -93,8 +93,6 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
file: ./docker/docker-cuda/Dockerfile
|
file: ./docker/docker-cuda/Dockerfile
|
||||||
build-args: |
|
|
||||||
EXTRAS=metrics,deepspeed,liger-kernel
|
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
tags: |
|
tags: |
|
||||||
docker.io/hiyouga/llamafactory:${{ steps.version.outputs.tag }}
|
docker.io/hiyouga/llamafactory:${{ steps.version.outputs.tag }}
|
||||||
|
|||||||
15
.github/workflows/tests.yml
vendored
15
.github/workflows/tests.yml
vendored
@@ -7,7 +7,7 @@ on:
|
|||||||
- "main"
|
- "main"
|
||||||
paths:
|
paths:
|
||||||
- "**/*.py"
|
- "**/*.py"
|
||||||
- "requirements.txt"
|
- "pyproject.toml"
|
||||||
- "Makefile"
|
- "Makefile"
|
||||||
- ".github/workflows/*.yml"
|
- ".github/workflows/*.yml"
|
||||||
pull_request:
|
pull_request:
|
||||||
@@ -15,7 +15,7 @@ on:
|
|||||||
- "main"
|
- "main"
|
||||||
paths:
|
paths:
|
||||||
- "**/*.py"
|
- "**/*.py"
|
||||||
- "requirements.txt"
|
- "pyproject.toml"
|
||||||
- "Makefile"
|
- "Makefile"
|
||||||
- ".github/workflows/*.yml"
|
- ".github/workflows/*.yml"
|
||||||
|
|
||||||
@@ -68,16 +68,19 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python }}
|
python-version: ${{ matrix.python }}
|
||||||
|
|
||||||
|
- name: Install uv
|
||||||
|
uses: astral-sh/setup-uv@v5
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
uv pip install --system torch torchvision --index-url https://download.pytorch.org/whl/cpu
|
||||||
python -m pip install torch torchvision --index-url https://download.pytorch.org/whl/cpu
|
uv pip install --system -e "."
|
||||||
python -m pip install ".[dev]"
|
uv pip install --system -r examples/requirements/dev.txt
|
||||||
|
|
||||||
- name: Install transformers
|
- name: Install transformers
|
||||||
if: ${{ matrix.transformers }}
|
if: ${{ matrix.transformers }}
|
||||||
run: |
|
run: |
|
||||||
python -m pip install "transformers==${{ matrix.transformers }}"
|
uv pip install --system "transformers==${{ matrix.transformers }}"
|
||||||
|
|
||||||
- name: Cache files
|
- name: Cache files
|
||||||
id: hf-hub-cache
|
id: hf-hub-cache
|
||||||
|
|||||||
12
.github/workflows/tests_npu.yml
vendored
12
.github/workflows/tests_npu.yml
vendored
@@ -7,7 +7,7 @@ on:
|
|||||||
- "main"
|
- "main"
|
||||||
paths:
|
paths:
|
||||||
- "**/*.py"
|
- "**/*.py"
|
||||||
- "requirements.txt"
|
- "pyproject.toml"
|
||||||
- "Makefile"
|
- "Makefile"
|
||||||
- ".github/workflows/*.yml"
|
- ".github/workflows/*.yml"
|
||||||
pull_request:
|
pull_request:
|
||||||
@@ -15,7 +15,7 @@ on:
|
|||||||
- "main"
|
- "main"
|
||||||
paths:
|
paths:
|
||||||
- "**/*.py"
|
- "**/*.py"
|
||||||
- "requirements.txt"
|
- "pyproject.toml"
|
||||||
- "Makefile"
|
- "Makefile"
|
||||||
- ".github/workflows/*.yml"
|
- ".github/workflows/*.yml"
|
||||||
|
|
||||||
@@ -48,10 +48,14 @@ jobs:
|
|||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Install uv
|
||||||
|
run: |
|
||||||
|
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
uv pip install --system -e "." torch-npu==${{matrix.pytorch_npu}}
|
||||||
python -m pip install ".[torch-npu,dev]" torch-npu==${{matrix.pytorch_npu}}
|
uv pip install --system -r examples/requirements/dev.txt
|
||||||
|
|
||||||
- name: Install node
|
- name: Install node
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
include LICENSE requirements.txt
|
include LICENSE
|
||||||
|
|||||||
20
Makefile
20
Makefile
@@ -1,24 +1,24 @@
|
|||||||
.PHONY: build commit license quality style test
|
.PHONY: build commit license quality style test
|
||||||
|
|
||||||
check_dirs := scripts src tests tests_v1 setup.py
|
check_dirs := scripts src tests tests_v1
|
||||||
|
|
||||||
build:
|
build:
|
||||||
pip3 install build && python3 -m build
|
uv build
|
||||||
|
|
||||||
commit:
|
commit:
|
||||||
pre-commit install
|
uv run pre-commit install
|
||||||
pre-commit run --all-files
|
uv run pre-commit run --all-files
|
||||||
|
|
||||||
license:
|
license:
|
||||||
python3 tests/check_license.py $(check_dirs)
|
uv run python tests/check_license.py $(check_dirs)
|
||||||
|
|
||||||
quality:
|
quality:
|
||||||
ruff check $(check_dirs)
|
uv run ruff check $(check_dirs)
|
||||||
ruff format --check $(check_dirs)
|
uv run ruff format --check $(check_dirs)
|
||||||
|
|
||||||
style:
|
style:
|
||||||
ruff check $(check_dirs) --fix
|
uv run ruff check $(check_dirs) --fix
|
||||||
ruff format $(check_dirs)
|
uv run ruff format $(check_dirs)
|
||||||
|
|
||||||
test:
|
test:
|
||||||
WANDB_DISABLED=true pytest -vv --import-mode=importlib tests/ tests_v1/
|
WANDB_DISABLED=true uv run pytest -vv --import-mode=importlib tests/ tests_v1/
|
||||||
|
|||||||
11
README.md
11
README.md
@@ -514,10 +514,12 @@ huggingface-cli login
|
|||||||
```bash
|
```bash
|
||||||
git clone --depth 1 https://github.com/hiyouga/LLaMA-Factory.git
|
git clone --depth 1 https://github.com/hiyouga/LLaMA-Factory.git
|
||||||
cd LLaMA-Factory
|
cd LLaMA-Factory
|
||||||
pip install -e ".[torch,metrics]" --no-build-isolation
|
pip install -e "." --no-build-isolation
|
||||||
```
|
```
|
||||||
|
|
||||||
Extra dependencies available: torch, torch-npu, metrics, deepspeed, liger-kernel, bitsandbytes, hqq, eetq, gptq, aqlm, vllm, sglang, galore, apollo, badam, adam-mini, qwen, minicpm_v, openmind, swanlab, dev
|
Optional dependencies available: `metrics`, `deepspeed`. Install with: `pip install -e ".[metrics,deepspeed]"`
|
||||||
|
|
||||||
|
Additional dependencies for specific features are available in `examples/requirements/`.
|
||||||
|
|
||||||
#### Install from Docker Image
|
#### Install from Docker Image
|
||||||
|
|
||||||
@@ -579,7 +581,7 @@ To enable FlashAttention-2 on the Windows platform, please use the script from [
|
|||||||
|
|
||||||
<details><summary>For Ascend NPU users</summary>
|
<details><summary>For Ascend NPU users</summary>
|
||||||
|
|
||||||
To install LLaMA Factory on Ascend NPU devices, please upgrade Python to version 3.10 or higher and specify extra dependencies: `pip install -e ".[torch-npu,metrics]"`. Additionally, you need to install the **[Ascend CANN Toolkit and Kernels](https://www.hiascend.com/developer/download/community/result?module=cann)**. Please follow the [installation tutorial](https://www.hiascend.com/document/detail/en/CANNCommunityEdition/600alphaX/softwareinstall/instg/atlasdeploy_03_0031.html) or use the following commands:
|
To install LLaMA Factory on Ascend NPU devices, please upgrade Python to version 3.10 or higher: `pip install -e "."`. Additionally, you need to install the **[Ascend CANN Toolkit and Kernels](https://www.hiascend.com/developer/download/community/result?module=cann)**. Please follow the [installation tutorial](https://www.hiascend.com/document/detail/en/CANNCommunityEdition/600alphaX/softwareinstall/instg/atlasdeploy_03_0031.html) or use the following commands:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# replace the url according to your CANN version and devices
|
# replace the url according to your CANN version and devices
|
||||||
@@ -714,7 +716,6 @@ For CUDA users:
|
|||||||
```bash
|
```bash
|
||||||
docker build -f ./docker/docker-cuda/Dockerfile \
|
docker build -f ./docker/docker-cuda/Dockerfile \
|
||||||
--build-arg PIP_INDEX=https://pypi.org/simple \
|
--build-arg PIP_INDEX=https://pypi.org/simple \
|
||||||
--build-arg EXTRAS=metrics \
|
|
||||||
-t llamafactory:latest .
|
-t llamafactory:latest .
|
||||||
|
|
||||||
docker run -dit --ipc=host --gpus=all \
|
docker run -dit --ipc=host --gpus=all \
|
||||||
@@ -731,7 +732,6 @@ For Ascend NPU users:
|
|||||||
```bash
|
```bash
|
||||||
docker build -f ./docker/docker-npu/Dockerfile \
|
docker build -f ./docker/docker-npu/Dockerfile \
|
||||||
--build-arg PIP_INDEX=https://pypi.org/simple \
|
--build-arg PIP_INDEX=https://pypi.org/simple \
|
||||||
--build-arg EXTRAS=torch-npu,metrics \
|
|
||||||
-t llamafactory:latest .
|
-t llamafactory:latest .
|
||||||
|
|
||||||
docker run -dit --ipc=host \
|
docker run -dit --ipc=host \
|
||||||
@@ -756,7 +756,6 @@ For AMD ROCm users:
|
|||||||
```bash
|
```bash
|
||||||
docker build -f ./docker/docker-rocm/Dockerfile \
|
docker build -f ./docker/docker-rocm/Dockerfile \
|
||||||
--build-arg PIP_INDEX=https://pypi.org/simple \
|
--build-arg PIP_INDEX=https://pypi.org/simple \
|
||||||
--build-arg EXTRAS=metrics \
|
|
||||||
-t llamafactory:latest .
|
-t llamafactory:latest .
|
||||||
|
|
||||||
docker run -dit --ipc=host \
|
docker run -dit --ipc=host \
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ FROM ${BASE_IMAGE}
|
|||||||
|
|
||||||
# Installation arguments
|
# Installation arguments
|
||||||
ARG PIP_INDEX=https://pypi.org/simple
|
ARG PIP_INDEX=https://pypi.org/simple
|
||||||
ARG EXTRAS=metrics
|
|
||||||
ARG INSTALL_FLASHATTN=false
|
ARG INSTALL_FLASHATTN=false
|
||||||
ARG HTTP_PROXY=""
|
ARG HTTP_PROXY=""
|
||||||
|
|
||||||
@@ -27,17 +26,13 @@ WORKDIR /app
|
|||||||
# Change pip source
|
# Change pip source
|
||||||
RUN pip config set global.index-url "${PIP_INDEX}" && \
|
RUN pip config set global.index-url "${PIP_INDEX}" && \
|
||||||
pip config set global.extra-index-url "${PIP_INDEX}" && \
|
pip config set global.extra-index-url "${PIP_INDEX}" && \
|
||||||
pip install --no-cache-dir --upgrade pip packaging wheel setuptools
|
pip install --no-cache-dir --upgrade pip packaging wheel setuptools "hatchling>=1.18.0" editables
|
||||||
|
|
||||||
# Install the requirements
|
# Copy the application into the image
|
||||||
COPY requirements.txt /app
|
|
||||||
RUN pip install --no-cache-dir -r requirements.txt
|
|
||||||
|
|
||||||
# Copy the rest of the application into the image
|
|
||||||
COPY . /app
|
COPY . /app
|
||||||
|
|
||||||
# Install LLaMA Factory
|
# Install LLaMA Factory
|
||||||
RUN pip install --no-cache-dir -e ".[${EXTRAS}]" --no-build-isolation
|
RUN pip install --no-cache-dir -e "." --no-build-isolation
|
||||||
|
|
||||||
# Rebuild flash attention
|
# Rebuild flash attention
|
||||||
RUN if [ "${INSTALL_FLASHATTN}" == "true" ]; then \
|
RUN if [ "${INSTALL_FLASHATTN}" == "true" ]; then \
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ ENV PYPI_MIRROR=https://mirrors.aliyun.com/pypi/simple/
|
|||||||
ENV PYPI_TRUSTED_HOST=mirrors.aliyun.com
|
ENV PYPI_TRUSTED_HOST=mirrors.aliyun.com
|
||||||
ENV APT_MIRROR=https://mirrors.tuna.tsinghua.edu.cn/ubuntu/
|
ENV APT_MIRROR=https://mirrors.tuna.tsinghua.edu.cn/ubuntu/
|
||||||
|
|
||||||
RUN pip install --upgrade pip setuptools wheel --trusted-host ${PYPI_TRUSTED_HOST} --index-url ${PYPI_MIRROR}
|
RUN pip install --upgrade pip setuptools wheel "hatchling>=1.18.0" editables --trusted-host ${PYPI_TRUSTED_HOST} --index-url ${PYPI_MIRROR}
|
||||||
|
|
||||||
RUN pip uninstall -y torch torchvision torch-tensorrt \
|
RUN pip uninstall -y torch torchvision torch-tensorrt \
|
||||||
flash_attn transformer-engine \
|
flash_attn transformer-engine \
|
||||||
@@ -56,14 +56,14 @@ ENV JAVA_HOME /usr/lib/jvm/java-21-openjdk-amd64
|
|||||||
# pip install LLaMA-Factory
|
# pip install LLaMA-Factory
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
COPY requirements.txt /app/
|
# Copy the application into the image
|
||||||
RUN pip install --no-cache-dir -r requirements.txt
|
COPY . /app
|
||||||
|
|
||||||
|
# Install LLaMA Factory
|
||||||
|
RUN pip install --no-cache-dir -e "." --no-build-isolation
|
||||||
|
|
||||||
RUN pip install "git+https://github.com/alibaba/roll.git#subdirectory=mcore_adapter"
|
RUN pip install "git+https://github.com/alibaba/roll.git#subdirectory=mcore_adapter"
|
||||||
|
|
||||||
COPY . /app/
|
|
||||||
RUN pip install -e ".[metrics]" --no-build-isolation
|
|
||||||
|
|
||||||
# Expose port 7860 for LLaMA Board
|
# Expose port 7860 for LLaMA Board
|
||||||
ENV GRADIO_SERVER_PORT=7860
|
ENV GRADIO_SERVER_PORT=7860
|
||||||
EXPOSE 7860
|
EXPOSE 7860
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ FROM ${BASE_IMAGE}
|
|||||||
|
|
||||||
# Installation arguments
|
# Installation arguments
|
||||||
ARG PIP_INDEX=https://pypi.org/simple
|
ARG PIP_INDEX=https://pypi.org/simple
|
||||||
ARG EXTRAS=torch-npu,metrics
|
|
||||||
ARG HTTP_PROXY=""
|
ARG HTTP_PROXY=""
|
||||||
ARG PYTORCH_INDEX=https://download.pytorch.org/whl/cpu
|
ARG PYTORCH_INDEX=https://download.pytorch.org/whl/cpu
|
||||||
|
|
||||||
@@ -28,21 +27,17 @@ WORKDIR /app
|
|||||||
# Change pip source
|
# Change pip source
|
||||||
RUN pip config set global.index-url "${PIP_INDEX}" && \
|
RUN pip config set global.index-url "${PIP_INDEX}" && \
|
||||||
pip config set global.extra-index-url "${PIP_INDEX}" && \
|
pip config set global.extra-index-url "${PIP_INDEX}" && \
|
||||||
pip install --no-cache-dir --upgrade pip packaging wheel setuptools
|
pip install --no-cache-dir --upgrade pip packaging wheel setuptools "hatchling>=1.18.0" editables
|
||||||
|
|
||||||
# Install torch-npu
|
# Install torch-npu
|
||||||
RUN pip uninstall -y torch torchvision torchaudio && \
|
RUN pip uninstall -y torch torchvision torchaudio && \
|
||||||
pip install --no-cache-dir "torch==2.7.1" "torch-npu==2.7.1" "torchvision==0.22.1" --index-url "${PYTORCH_INDEX}"
|
pip install --no-cache-dir "torch==2.7.1" "torch-npu==2.7.1" "torchvision==0.22.1" --index-url "${PYTORCH_INDEX}"
|
||||||
|
|
||||||
# Install the requirements
|
# Copy the application into the image
|
||||||
COPY requirements.txt /app
|
|
||||||
RUN pip install --no-cache-dir -r requirements.txt
|
|
||||||
|
|
||||||
# Copy the rest of the application into the image
|
|
||||||
COPY . /app
|
COPY . /app
|
||||||
|
|
||||||
# Install LLaMA Factory
|
# Install LLaMA Factory
|
||||||
RUN pip install --no-cache-dir -e ".[${EXTRAS}]" --no-build-isolation
|
RUN pip install --no-cache-dir -e "." --no-build-isolation
|
||||||
|
|
||||||
# Set up volumes
|
# Set up volumes
|
||||||
# VOLUME [ "/root/.cache/huggingface", "/app/shared_data", "/app/output" ]
|
# VOLUME [ "/root/.cache/huggingface", "/app/shared_data", "/app/output" ]
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ FROM ${BASE_IMAGE}
|
|||||||
|
|
||||||
# Installation arguments
|
# Installation arguments
|
||||||
ARG PIP_INDEX=https://pypi.org/simple
|
ARG PIP_INDEX=https://pypi.org/simple
|
||||||
ARG EXTRAS=metrics
|
|
||||||
ARG INSTALL_FLASHATTN=false
|
ARG INSTALL_FLASHATTN=false
|
||||||
ARG HTTP_PROXY=""
|
ARG HTTP_PROXY=""
|
||||||
ARG PYTORCH_INDEX=https://download.pytorch.org/whl/rocm6.3
|
ARG PYTORCH_INDEX=https://download.pytorch.org/whl/rocm6.3
|
||||||
@@ -28,21 +27,17 @@ WORKDIR /app
|
|||||||
# Change pip source
|
# Change pip source
|
||||||
RUN pip config set global.index-url "${PIP_INDEX}" && \
|
RUN pip config set global.index-url "${PIP_INDEX}" && \
|
||||||
pip config set global.extra-index-url "${PIP_INDEX}" && \
|
pip config set global.extra-index-url "${PIP_INDEX}" && \
|
||||||
pip install --no-cache-dir --upgrade pip packaging wheel setuptools
|
pip install --no-cache-dir --upgrade pip packaging wheel setuptools "hatchling>=1.18.0" editables
|
||||||
|
|
||||||
# Reinstall pytorch rocm
|
# Reinstall pytorch rocm
|
||||||
RUN pip uninstall -y torch torchvision torchaudio && \
|
RUN pip uninstall -y torch torchvision torchaudio && \
|
||||||
pip install --no-cache-dir --pre torch torchvision torchaudio --index-url "${PYTORCH_INDEX}"
|
pip install --no-cache-dir --pre torch torchvision torchaudio --index-url "${PYTORCH_INDEX}"
|
||||||
|
|
||||||
# Install the requirements
|
# Copy the application into the image
|
||||||
COPY requirements.txt /app
|
|
||||||
RUN pip install --no-cache-dir -r requirements.txt
|
|
||||||
|
|
||||||
# Copy the rest of the application into the image
|
|
||||||
COPY . /app
|
COPY . /app
|
||||||
|
|
||||||
# Install LLaMA Factory
|
# Install LLaMA Factory
|
||||||
RUN pip install --no-cache-dir -e ".[${EXTRAS}]" --no-build-isolation
|
RUN pip install --no-cache-dir -e "." --no-build-isolation
|
||||||
|
|
||||||
# Rebuild flash attention
|
# Rebuild flash attention
|
||||||
RUN if [ "${INSTALL_FLASHATTN}" == "true" ]; then \
|
RUN if [ "${INSTALL_FLASHATTN}" == "true" ]; then \
|
||||||
|
|||||||
1
examples/requirements/adam-mini.txt
Normal file
1
examples/requirements/adam-mini.txt
Normal file
@@ -0,0 +1 @@
|
|||||||
|
adam-mini
|
||||||
1
examples/requirements/apollo.txt
Normal file
1
examples/requirements/apollo.txt
Normal file
@@ -0,0 +1 @@
|
|||||||
|
apollo-torch
|
||||||
1
examples/requirements/aqlm.txt
Normal file
1
examples/requirements/aqlm.txt
Normal file
@@ -0,0 +1 @@
|
|||||||
|
aqlm[gpu]>=1.1.0
|
||||||
1
examples/requirements/badam.txt
Normal file
1
examples/requirements/badam.txt
Normal file
@@ -0,0 +1 @@
|
|||||||
|
badam>=1.2.1
|
||||||
1
examples/requirements/bitsandbytes.txt
Normal file
1
examples/requirements/bitsandbytes.txt
Normal file
@@ -0,0 +1 @@
|
|||||||
|
bitsandbytes>=0.39.0
|
||||||
4
examples/requirements/dev.txt
Normal file
4
examples/requirements/dev.txt
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
pre-commit
|
||||||
|
ruff
|
||||||
|
pytest
|
||||||
|
build
|
||||||
1
examples/requirements/eetq.txt
Normal file
1
examples/requirements/eetq.txt
Normal file
@@ -0,0 +1 @@
|
|||||||
|
eetq
|
||||||
2
examples/requirements/fp8-te.txt
Normal file
2
examples/requirements/fp8-te.txt
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
transformer_engine[pytorch]>=2.0.0
|
||||||
|
accelerate>=1.10.0
|
||||||
2
examples/requirements/fp8.txt
Normal file
2
examples/requirements/fp8.txt
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
torchao>=0.8.0
|
||||||
|
accelerate>=1.10.0
|
||||||
1
examples/requirements/galore.txt
Normal file
1
examples/requirements/galore.txt
Normal file
@@ -0,0 +1 @@
|
|||||||
|
galore-torch
|
||||||
2
examples/requirements/gptq.txt
Normal file
2
examples/requirements/gptq.txt
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
optimum>=1.24.0
|
||||||
|
gptqmodel>=2.0.0
|
||||||
1
examples/requirements/hqq.txt
Normal file
1
examples/requirements/hqq.txt
Normal file
@@ -0,0 +1 @@
|
|||||||
|
hqq
|
||||||
1
examples/requirements/liger-kernel.txt
Normal file
1
examples/requirements/liger-kernel.txt
Normal file
@@ -0,0 +1 @@
|
|||||||
|
liger-kernel>=0.5.5
|
||||||
8
examples/requirements/minicpm-v.txt
Normal file
8
examples/requirements/minicpm-v.txt
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
soundfile
|
||||||
|
torchvision
|
||||||
|
torchaudio
|
||||||
|
vector_quantize_pytorch
|
||||||
|
vocos
|
||||||
|
msgpack
|
||||||
|
referencing
|
||||||
|
jsonschema_specifications
|
||||||
1
examples/requirements/openmind.txt
Normal file
1
examples/requirements/openmind.txt
Normal file
@@ -0,0 +1 @@
|
|||||||
|
openmind
|
||||||
2
examples/requirements/sglang.txt
Normal file
2
examples/requirements/sglang.txt
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
sglang[srt]>=0.4.5
|
||||||
|
transformers==4.51.1
|
||||||
1
examples/requirements/swanlab.txt
Normal file
1
examples/requirements/swanlab.txt
Normal file
@@ -0,0 +1 @@
|
|||||||
|
swanlab
|
||||||
1
examples/requirements/vllm.txt
Normal file
1
examples/requirements/vllm.txt
Normal file
@@ -0,0 +1 @@
|
|||||||
|
vllm>=0.4.3,<=0.11.0
|
||||||
128
pyproject.toml
128
pyproject.toml
@@ -1,22 +1,104 @@
|
|||||||
[build-system]
|
[build-system]
|
||||||
requires = ["setuptools>=61.0"]
|
requires = ["hatchling"]
|
||||||
build-backend = "setuptools.build_meta"
|
build-backend = "hatchling.build"
|
||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "llamafactory"
|
name = "llamafactory"
|
||||||
|
dynamic = ["version"]
|
||||||
|
description = "Unified Efficient Fine-Tuning of 100+ LLMs"
|
||||||
|
readme = "README.md"
|
||||||
|
license = "Apache-2.0"
|
||||||
requires-python = ">=3.9.0"
|
requires-python = ">=3.9.0"
|
||||||
dynamic = [
|
authors = [
|
||||||
"version",
|
{ name = "hiyouga", email = "hiyouga@buaa.edu.cn" }
|
||||||
"dependencies",
|
|
||||||
"optional-dependencies",
|
|
||||||
"scripts",
|
|
||||||
"authors",
|
|
||||||
"description",
|
|
||||||
"readme",
|
|
||||||
"license",
|
|
||||||
"keywords",
|
|
||||||
"classifiers"
|
|
||||||
]
|
]
|
||||||
|
keywords = [
|
||||||
|
"AI",
|
||||||
|
"LLM",
|
||||||
|
"GPT",
|
||||||
|
"ChatGPT",
|
||||||
|
"Llama",
|
||||||
|
"Transformer",
|
||||||
|
"DeepSeek",
|
||||||
|
"Pytorch"
|
||||||
|
]
|
||||||
|
classifiers = [
|
||||||
|
"Development Status :: 4 - Beta",
|
||||||
|
"Intended Audience :: Developers",
|
||||||
|
"Intended Audience :: Education",
|
||||||
|
"Intended Audience :: Science/Research",
|
||||||
|
"License :: OSI Approved :: Apache Software License",
|
||||||
|
"Operating System :: OS Independent",
|
||||||
|
"Programming Language :: Python :: 3",
|
||||||
|
"Programming Language :: Python :: 3.9",
|
||||||
|
"Programming Language :: Python :: 3.10",
|
||||||
|
"Programming Language :: Python :: 3.11",
|
||||||
|
"Programming Language :: Python :: 3.12",
|
||||||
|
"Topic :: Scientific/Engineering :: Artificial Intelligence"
|
||||||
|
]
|
||||||
|
dependencies = [
|
||||||
|
# core deps
|
||||||
|
"transformers>=4.49.0,<=4.56.2,!=4.52.0; python_version < '3.10'",
|
||||||
|
"transformers>=4.49.0,<=4.57.1,!=4.52.0,!=4.57.0; python_version >= '3.10'",
|
||||||
|
"datasets>=2.16.0,<=4.0.0",
|
||||||
|
"accelerate>=1.3.0,<=1.11.0",
|
||||||
|
"peft>=0.14.0,<=0.17.1",
|
||||||
|
"trl>=0.8.6,<=0.9.6",
|
||||||
|
"torchdata",
|
||||||
|
# torch
|
||||||
|
"torch>=2.0.0",
|
||||||
|
"torchvision>=0.15.0",
|
||||||
|
# gui
|
||||||
|
"gradio>=4.38.0,<=5.45.0",
|
||||||
|
"matplotlib>=3.7.0",
|
||||||
|
"tyro<0.9.0",
|
||||||
|
# ops
|
||||||
|
"einops",
|
||||||
|
"numpy<2.0.0",
|
||||||
|
"pandas>=2.0.0",
|
||||||
|
"scipy",
|
||||||
|
# model and tokenizer
|
||||||
|
"sentencepiece",
|
||||||
|
"tiktoken",
|
||||||
|
"modelscope>=1.14.0",
|
||||||
|
"hf-transfer",
|
||||||
|
"safetensors<=0.5.3",
|
||||||
|
# python
|
||||||
|
"fire",
|
||||||
|
"omegaconf",
|
||||||
|
"packaging",
|
||||||
|
"protobuf",
|
||||||
|
"pyyaml",
|
||||||
|
"pydantic<=2.10.6",
|
||||||
|
# api
|
||||||
|
"uvicorn",
|
||||||
|
"fastapi",
|
||||||
|
"sse-starlette",
|
||||||
|
# media
|
||||||
|
"av",
|
||||||
|
"librosa",
|
||||||
|
# yanked
|
||||||
|
"propcache!=0.4.0"
|
||||||
|
]
|
||||||
|
|
||||||
|
[project.optional-dependencies]
|
||||||
|
metrics = ["nltk", "jieba", "rouge-chinese"]
|
||||||
|
deepspeed = ["deepspeed>=0.10.0,<=0.16.9"]
|
||||||
|
|
||||||
|
[project.scripts]
|
||||||
|
llamafactory-cli = "llamafactory.cli:main"
|
||||||
|
lmf = "llamafactory.cli:main"
|
||||||
|
|
||||||
|
[project.urls]
|
||||||
|
Homepage = "https://github.com/hiyouga/LLaMA-Factory"
|
||||||
|
Repository = "https://github.com/hiyouga/LLaMA-Factory"
|
||||||
|
|
||||||
|
[tool.hatch.build.targets.wheel]
|
||||||
|
packages = ["src/llamafactory"]
|
||||||
|
|
||||||
|
[tool.hatch.version]
|
||||||
|
path = "src/llamafactory/extras/env.py"
|
||||||
|
pattern = "VERSION = \"(?P<version>[^\"]+)\""
|
||||||
|
|
||||||
[tool.ruff]
|
[tool.ruff]
|
||||||
target-version = "py39"
|
target-version = "py39"
|
||||||
@@ -73,23 +155,3 @@ indent-style = "space"
|
|||||||
docstring-code-format = true
|
docstring-code-format = true
|
||||||
skip-magic-trailing-comma = false
|
skip-magic-trailing-comma = false
|
||||||
line-ending = "auto"
|
line-ending = "auto"
|
||||||
|
|
||||||
[tool.uv]
|
|
||||||
conflicts = [
|
|
||||||
[
|
|
||||||
{ extra = "torch-npu" },
|
|
||||||
{ extra = "aqlm" },
|
|
||||||
],
|
|
||||||
[
|
|
||||||
{ extra = "torch-npu" },
|
|
||||||
{ extra = "vllm" },
|
|
||||||
],
|
|
||||||
[
|
|
||||||
{ extra = "torch-npu" },
|
|
||||||
{ extra = "sglang" },
|
|
||||||
],
|
|
||||||
[
|
|
||||||
{ extra = "vllm" },
|
|
||||||
{ extra = "sglang" },
|
|
||||||
],
|
|
||||||
]
|
|
||||||
|
|||||||
@@ -1,39 +0,0 @@
|
|||||||
# core deps
|
|
||||||
transformers>=4.49.0,<=4.56.2,!=4.52.0; python_version < '3.10'
|
|
||||||
transformers>=4.49.0,<=4.57.1,!=4.52.0,!=4.57.0; python_version >= '3.10'
|
|
||||||
datasets>=2.16.0,<=4.0.0
|
|
||||||
accelerate>=1.3.0,<=1.11.0
|
|
||||||
peft>=0.14.0,<=0.17.1
|
|
||||||
trl>=0.8.6,<=0.9.6
|
|
||||||
torchdata
|
|
||||||
# gui
|
|
||||||
gradio>=4.38.0,<=5.45.0
|
|
||||||
matplotlib>=3.7.0
|
|
||||||
tyro<0.9.0
|
|
||||||
# ops
|
|
||||||
einops
|
|
||||||
numpy<2.0.0
|
|
||||||
pandas>=2.0.0
|
|
||||||
scipy
|
|
||||||
# model and tokenizer
|
|
||||||
sentencepiece
|
|
||||||
tiktoken
|
|
||||||
modelscope>=1.14.0
|
|
||||||
hf-transfer
|
|
||||||
safetensors<=0.5.3
|
|
||||||
# python
|
|
||||||
fire
|
|
||||||
omegaconf
|
|
||||||
packaging
|
|
||||||
protobuf
|
|
||||||
pyyaml
|
|
||||||
pydantic<=2.10.6
|
|
||||||
# api
|
|
||||||
uvicorn
|
|
||||||
fastapi
|
|
||||||
sse-starlette
|
|
||||||
# media
|
|
||||||
av
|
|
||||||
librosa
|
|
||||||
# yanked
|
|
||||||
propcache!=0.4.0
|
|
||||||
116
setup.py
116
setup.py
@@ -1,116 +0,0 @@
|
|||||||
# Copyright 2025 the LlamaFactory team.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
|
|
||||||
from setuptools import find_packages, setup
|
|
||||||
|
|
||||||
|
|
||||||
def get_version() -> str:
|
|
||||||
with open(os.path.join("src", "llamafactory", "extras", "env.py"), encoding="utf-8") as f:
|
|
||||||
file_content = f.read()
|
|
||||||
pattern = r"{}\W*=\W*\"([^\"]+)\"".format("VERSION")
|
|
||||||
(version,) = re.findall(pattern, file_content)
|
|
||||||
return version
|
|
||||||
|
|
||||||
|
|
||||||
def get_requires() -> list[str]:
|
|
||||||
with open("requirements.txt", encoding="utf-8") as f:
|
|
||||||
file_content = f.read()
|
|
||||||
lines = [line.strip() for line in file_content.strip().split("\n") if not line.startswith("#")]
|
|
||||||
return lines
|
|
||||||
|
|
||||||
|
|
||||||
def get_console_scripts() -> list[str]:
|
|
||||||
console_scripts = ["llamafactory-cli = llamafactory.cli:main"]
|
|
||||||
if os.getenv("ENABLE_SHORT_CONSOLE", "1").lower() in ["true", "y", "1"]:
|
|
||||||
console_scripts.append("lmf = llamafactory.cli:main")
|
|
||||||
|
|
||||||
return console_scripts
|
|
||||||
|
|
||||||
|
|
||||||
extra_require = {
|
|
||||||
"torch": ["torch>=2.0.0", "torchvision>=0.15.0"],
|
|
||||||
"torch-npu": ["torch==2.7.1", "torch-npu==2.7.1", "torchvision==0.22.1", "decorator"],
|
|
||||||
"metrics": ["nltk", "jieba", "rouge-chinese"],
|
|
||||||
"deepspeed": ["deepspeed>=0.10.0,<=0.16.9"],
|
|
||||||
"liger-kernel": ["liger-kernel>=0.5.5"],
|
|
||||||
"bitsandbytes": ["bitsandbytes>=0.39.0"],
|
|
||||||
"hqq": ["hqq"],
|
|
||||||
"eetq": ["eetq"],
|
|
||||||
"gptq": ["optimum>=1.24.0", "gptqmodel>=2.0.0"],
|
|
||||||
"aqlm": ["aqlm[gpu]>=1.1.0"],
|
|
||||||
"vllm": ["vllm>=0.4.3,<=0.11.0"],
|
|
||||||
"sglang": ["sglang[srt]>=0.4.5", "transformers==4.51.1"],
|
|
||||||
"galore": ["galore-torch"],
|
|
||||||
"apollo": ["apollo-torch"],
|
|
||||||
"badam": ["badam>=1.2.1"],
|
|
||||||
"adam-mini": ["adam-mini"],
|
|
||||||
"minicpm_v": [
|
|
||||||
"soundfile",
|
|
||||||
"torchvision",
|
|
||||||
"torchaudio",
|
|
||||||
"vector_quantize_pytorch",
|
|
||||||
"vocos",
|
|
||||||
"msgpack",
|
|
||||||
"referencing",
|
|
||||||
"jsonschema_specifications",
|
|
||||||
],
|
|
||||||
"openmind": ["openmind"],
|
|
||||||
"swanlab": ["swanlab"],
|
|
||||||
"fp8": ["torchao>=0.8.0", "accelerate>=1.10.0"],
|
|
||||||
"fp8-te": ["transformer_engine[pytorch]>=2.0.0", "accelerate>=1.10.0"],
|
|
||||||
"fp8-all": ["torchao>=0.8.0", "transformer_engine[pytorch]>=2.0.0", "accelerate>=1.10.0"],
|
|
||||||
"dev": ["pre-commit", "ruff", "pytest", "build"],
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
setup(
|
|
||||||
name="llamafactory",
|
|
||||||
version=get_version(),
|
|
||||||
author="hiyouga",
|
|
||||||
author_email="hiyouga@buaa.edu.cn",
|
|
||||||
description="Unified Efficient Fine-Tuning of 100+ LLMs",
|
|
||||||
long_description=open("README.md", encoding="utf-8").read(),
|
|
||||||
long_description_content_type="text/markdown",
|
|
||||||
keywords=["AI", "LLM", "GPT", "ChatGPT", "Llama", "Transformer", "DeepSeek", "Pytorch"],
|
|
||||||
license="Apache 2.0 License",
|
|
||||||
url="https://github.com/hiyouga/LLaMA-Factory",
|
|
||||||
package_dir={"": "src"},
|
|
||||||
packages=find_packages("src"),
|
|
||||||
python_requires=">=3.9.0",
|
|
||||||
install_requires=get_requires(),
|
|
||||||
extras_require=extra_require,
|
|
||||||
entry_points={"console_scripts": get_console_scripts()},
|
|
||||||
classifiers=[
|
|
||||||
"Development Status :: 4 - Beta",
|
|
||||||
"Intended Audience :: Developers",
|
|
||||||
"Intended Audience :: Education",
|
|
||||||
"Intended Audience :: Science/Research",
|
|
||||||
"License :: OSI Approved :: Apache Software License",
|
|
||||||
"Operating System :: OS Independent",
|
|
||||||
"Programming Language :: Python :: 3",
|
|
||||||
"Programming Language :: Python :: 3.9",
|
|
||||||
"Programming Language :: Python :: 3.10",
|
|
||||||
"Programming Language :: Python :: 3.11",
|
|
||||||
"Programming Language :: Python :: 3.12",
|
|
||||||
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
Reference in New Issue
Block a user