mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-08-01 03:02:51 +08:00
[doc] add no build isolation (#8103)
This commit is contained in:
parent
16e26236eb
commit
ed2f89efaf
@ -483,13 +483,13 @@ huggingface-cli login
|
||||
```bash
|
||||
git clone --depth 1 https://github.com/hiyouga/LLaMA-Factory.git
|
||||
cd LLaMA-Factory
|
||||
pip install -e ".[torch,metrics]"
|
||||
pip install -e ".[torch,metrics]" --no-build-isolation
|
||||
```
|
||||
|
||||
Extra dependencies available: torch, torch-npu, metrics, deepspeed, liger-kernel, bitsandbytes, hqq, eetq, gptq, aqlm, vllm, sglang, galore, apollo, badam, adam-mini, qwen, minicpm_v, modelscope, openmind, swanlab, quality
|
||||
|
||||
> [!TIP]
|
||||
> Use `pip install --no-deps -e .` to resolve package conflicts.
|
||||
> Use `pip install -e . --no-deps --no-build-isolation` to resolve package conflicts.
|
||||
|
||||
<details><summary>Setting up a virtual environment with <b>uv</b></summary>
|
||||
|
||||
|
@ -470,13 +470,13 @@ huggingface-cli login
|
||||
```bash
|
||||
git clone --depth 1 https://github.com/hiyouga/LLaMA-Factory.git
|
||||
cd LLaMA-Factory
|
||||
pip install -e ".[torch,metrics]"
|
||||
pip install -e ".[torch,metrics]" --no-build-isolation
|
||||
```
|
||||
|
||||
可选的额外依赖项:torch、torch-npu、metrics、deepspeed、liger-kernel、bitsandbytes、hqq、eetq、gptq、aqlm、vllm、sglang、galore、apollo、badam、adam-mini、qwen、minicpm_v、modelscope、openmind、swanlab、quality
|
||||
|
||||
> [!TIP]
|
||||
> 遇到包冲突时,可使用 `pip install --no-deps -e .` 解决。
|
||||
> 遇到包冲突时,可使用 `pip install -e . --no-deps --no-build-isolation` 解决。
|
||||
|
||||
<details><summary>使用 <b>uv</b> 构建虚拟环境</summary>
|
||||
|
||||
|
Binary file not shown.
Before Width: | Height: | Size: 164 KiB After Width: | Height: | Size: 167 KiB |
Binary file not shown.
Before Width: | Height: | Size: 169 KiB After Width: | Height: | Size: 166 KiB |
@ -17,8 +17,8 @@ import json
|
||||
from typing import Optional
|
||||
|
||||
import fire
|
||||
from transformers import Seq2SeqTrainingArguments
|
||||
from tqdm import tqdm
|
||||
from transformers import Seq2SeqTrainingArguments
|
||||
|
||||
from llamafactory.data import get_dataset, get_template_and_fix_tokenizer
|
||||
from llamafactory.extras.constants import IGNORE_INDEX
|
||||
|
@ -63,8 +63,8 @@ if is_transformers_version_greater_than("4.49.0"):
|
||||
except ImportError:
|
||||
try:
|
||||
# If that fails, try importing from the new location
|
||||
from transformers.video_utils import make_batched_videos
|
||||
from transformers.image_utils import make_flat_list_of_images
|
||||
from transformers.video_utils import make_batched_videos
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"Could not import make_batched_videos and make_flat_list_of_images. "
|
||||
|
@ -84,14 +84,15 @@ def run_pt(
|
||||
perplexity = math.exp(metrics[f"eval_{key}_loss"])
|
||||
except OverflowError:
|
||||
perplexity = float("inf")
|
||||
|
||||
metrics[f"eval_{key}_perplexity"] = perplexity
|
||||
else:
|
||||
try:
|
||||
perplexity = math.exp(metrics["eval_loss"])
|
||||
except OverflowError:
|
||||
perplexity = float("inf")
|
||||
metrics["eval_perplexity"] = perplexity
|
||||
|
||||
metrics["eval_perplexity"] = perplexity
|
||||
|
||||
trainer.log_metrics("eval", metrics)
|
||||
trainer.save_metrics("eval", metrics)
|
||||
|
Loading…
x
Reference in New Issue
Block a user