mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-12-27 17:20:35 +08:00
156 lines
3.5 KiB
TOML
156 lines
3.5 KiB
TOML
[build-system]
|
|
requires = ["hatchling"]
|
|
build-backend = "hatchling.build"
|
|
|
|
[project]
|
|
name = "llamafactory"
|
|
dynamic = ["version"]
|
|
description = "Unified Efficient Fine-Tuning of 100+ LLMs"
|
|
readme = "README.md"
|
|
license = "Apache-2.0"
|
|
requires-python = ">=3.9.0"
|
|
authors = [
|
|
{ name = "hiyouga", email = "hiyouga@buaa.edu.cn" }
|
|
]
|
|
keywords = [
|
|
"AI",
|
|
"LLM",
|
|
"GPT",
|
|
"ChatGPT",
|
|
"Llama",
|
|
"Transformer",
|
|
"DeepSeek",
|
|
"Pytorch"
|
|
]
|
|
classifiers = [
|
|
"Development Status :: 4 - Beta",
|
|
"Intended Audience :: Developers",
|
|
"Intended Audience :: Education",
|
|
"Intended Audience :: Science/Research",
|
|
"License :: OSI Approved :: Apache Software License",
|
|
"Operating System :: OS Independent",
|
|
"Programming Language :: Python :: 3",
|
|
"Programming Language :: Python :: 3.9",
|
|
"Programming Language :: Python :: 3.10",
|
|
"Programming Language :: Python :: 3.11",
|
|
"Programming Language :: Python :: 3.12",
|
|
"Topic :: Scientific/Engineering :: Artificial Intelligence"
|
|
]
|
|
dependencies = [
|
|
# core deps
|
|
"torch>=2.4.0",
|
|
"torchvision>=0.19.0",
|
|
"transformers>=4.49.0,<=4.56.2,!=4.52.0; python_version < '3.10'",
|
|
"transformers>=4.49.0,<=4.57.1,!=4.52.0,!=4.57.0; python_version >= '3.10'",
|
|
"datasets>=2.16.0,<=4.0.0",
|
|
"accelerate>=1.3.0,<=1.11.0",
|
|
"peft>=0.14.0,<=0.17.1",
|
|
"trl>=0.8.6,<=0.9.6",
|
|
"torchdata>=0.10.0,<=0.11.0",
|
|
# gui
|
|
"gradio>=4.38.0,<=6.2.0",
|
|
"matplotlib>=3.7.0",
|
|
"tyro<0.9.0",
|
|
# ops
|
|
"einops",
|
|
"numpy",
|
|
"pandas",
|
|
"scipy",
|
|
# model and tokenizer
|
|
"sentencepiece",
|
|
"tiktoken",
|
|
"modelscope",
|
|
"hf-transfer",
|
|
"safetensors",
|
|
# python
|
|
"fire",
|
|
"omegaconf",
|
|
"packaging",
|
|
"protobuf",
|
|
"pyyaml",
|
|
"pydantic",
|
|
# api
|
|
"uvicorn",
|
|
"fastapi",
|
|
"sse-starlette",
|
|
# media
|
|
"av",
|
|
"librosa"
|
|
]
|
|
|
|
[project.optional-dependencies]
|
|
dev = ["pre-commit", "ruff", "pytest", "build"]
|
|
metrics = ["nltk", "jieba", "rouge-chinese"]
|
|
deepspeed = ["deepspeed>=0.10.0,<=0.16.9"]
|
|
|
|
[project.scripts]
|
|
llamafactory-cli = "llamafactory.cli:main"
|
|
lmf = "llamafactory.cli:main"
|
|
|
|
[project.urls]
|
|
Homepage = "https://github.com/hiyouga/LLaMA-Factory"
|
|
Repository = "https://github.com/hiyouga/LLaMA-Factory"
|
|
|
|
[tool.hatch.build.targets.wheel]
|
|
packages = ["src/llamafactory"]
|
|
|
|
[tool.hatch.version]
|
|
path = "src/llamafactory/extras/env.py"
|
|
pattern = "VERSION = \"(?P<version>[^\"]+)\""
|
|
|
|
[tool.ruff]
|
|
target-version = "py39"
|
|
line-length = 119
|
|
indent-width = 4
|
|
|
|
[tool.ruff.lint]
|
|
ignore = [
|
|
"C408", # collection
|
|
"C901", # complex
|
|
"E501", # line too long
|
|
"E731", # lambda function
|
|
"E741", # ambiguous var name
|
|
"D100", # no doc public module
|
|
"D101", # no doc public class
|
|
"D102", # no doc public method
|
|
"D103", # no doc public function
|
|
"D104", # no doc public package
|
|
"D105", # no doc magic method
|
|
"D107", # no doc __init__
|
|
]
|
|
extend-select = [
|
|
"C", # complexity
|
|
"E", # error
|
|
"F", # pyflakes
|
|
"I", # isort
|
|
"W", # warning
|
|
"UP", # pyupgrade
|
|
"D", # pydocstyle
|
|
"PT009", # pytest assert
|
|
"RUF022", # sort __all__
|
|
]
|
|
|
|
[tool.ruff.lint.isort]
|
|
lines-after-imports = 2
|
|
known-first-party = ["llamafactory"]
|
|
known-third-party = [
|
|
"accelerate",
|
|
"datasets",
|
|
"gradio",
|
|
"numpy",
|
|
"peft",
|
|
"torch",
|
|
"transformers",
|
|
"trl",
|
|
]
|
|
|
|
[tool.ruff.lint.pydocstyle]
|
|
convention = "google"
|
|
|
|
[tool.ruff.format]
|
|
quote-style = "double"
|
|
indent-style = "space"
|
|
docstring-code-format = true
|
|
skip-magic-trailing-comma = false
|
|
line-ending = "auto"
|