Compare commits
91 Commits
v0.9.3
...
bcc2c1fd8f
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bcc2c1fd8f | ||
|
|
7dd910f067 | ||
|
|
d10d65e4ce | ||
|
|
1c44b60e3e | ||
|
|
e2b1594d31 | ||
|
|
09dedf144f | ||
|
|
a04d777d7f | ||
|
|
6ffebe5ff7 | ||
|
|
0761a4448f | ||
|
|
abc3b1e1c4 | ||
|
|
344c760cc1 | ||
|
|
80fe3a172d | ||
|
|
800934b507 | ||
|
|
e2ba32598d | ||
|
|
812720909e | ||
|
|
260b5625c3 | ||
|
|
52488ac974 | ||
|
|
610a3f1094 | ||
|
|
a22dab97fd | ||
|
|
db223e3975 | ||
|
|
7e710c6d3e | ||
|
|
185f0556d4 | ||
|
|
1c675522fd | ||
|
|
6c777375b7 | ||
|
|
9c433f6b41 | ||
|
|
ec41ef08aa | ||
|
|
0ab0be9df2 | ||
|
|
c14a5fefee | ||
|
|
1664657d80 | ||
|
|
022a326ca4 | ||
|
|
c1e1f24f5f | ||
|
|
2c31279316 | ||
|
|
003a2acb1a | ||
|
|
1ada15981a | ||
|
|
936f4fd78e | ||
|
|
41648020db | ||
|
|
b8272a874b | ||
|
|
e695fdfa70 | ||
|
|
893edb26d0 | ||
|
|
dc61e78e77 | ||
|
|
ef507ae8e0 | ||
|
|
c244b1edb9 | ||
|
|
8a5d6c8a74 | ||
|
|
b523543994 | ||
|
|
4dfad24902 | ||
|
|
c709c0378d | ||
|
|
b5071f4b2c | ||
|
|
4a9ca24122 | ||
|
|
cb03eb422d | ||
|
|
a416ab48d8 | ||
|
|
d64651a637 | ||
|
|
7c223c432b | ||
|
|
52882d01c3 | ||
|
|
4e0bf35eb4 | ||
|
|
8efa506c16 | ||
|
|
d6767f355a | ||
|
|
c6e2871944 | ||
|
|
9d6565d1a8 | ||
|
|
1639e4b587 | ||
|
|
9c9b307d33 | ||
|
|
cf13964c4c | ||
|
|
542fa97a72 | ||
|
|
7f8e5f52f9 | ||
|
|
12ed792db9 | ||
|
|
4b0ec83928 | ||
|
|
1b1ec9bfb6 | ||
|
|
e0dfdb7dbb | ||
|
|
17ab40793b | ||
|
|
0686206020 | ||
|
|
16f13d304b | ||
|
|
57524751e0 | ||
|
|
906b31fd47 | ||
|
|
bede213da7 | ||
|
|
e9f70daabe | ||
|
|
cbb65567a9 | ||
|
|
a5a93597b1 | ||
|
|
d17a672251 | ||
|
|
4f0da0aec9 | ||
|
|
2c26ce6ac4 | ||
|
|
abc6ce6168 | ||
|
|
4407231a3b | ||
|
|
f276b9a963 | ||
|
|
48897e5b16 | ||
|
|
9cd81aa424 | ||
|
|
ecbccb4c5d | ||
|
|
9af7915f7b | ||
|
|
7b252b2368 | ||
|
|
88a92be808 | ||
|
|
c6c764388c | ||
|
|
3e392473d1 | ||
|
|
ad345ec054 |
7
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1 +1,8 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: 📚 FAQs | 常见问题
|
||||
url: https://github.com/hiyouga/LLaMA-Factory/issues/4614
|
||||
about: Reading in advance is recommended | 建议提前阅读
|
||||
- name: Discussions | 讨论区
|
||||
url: https://github.com/hiyouga/LLaMA-Factory/discussions
|
||||
about: Please ask fine-tuning questions here | 请在这里讨论训练问题
|
||||
|
||||
62
.github/workflows/docker.yml
vendored
@@ -21,10 +21,17 @@ on:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
device:
|
||||
- "cuda"
|
||||
- "npu"
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ matrix.device }}
|
||||
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
|
||||
|
||||
environment:
|
||||
@@ -33,27 +40,44 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Free up disk space
|
||||
run: |
|
||||
df -h
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo rm -rf /opt/ghc
|
||||
sudo rm -rf /opt/hostedtoolcache
|
||||
df -h
|
||||
uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be # v1.3.1
|
||||
with:
|
||||
tool-cache: true
|
||||
docker-images: false
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.9"
|
||||
|
||||
- name: Get llamafactory version
|
||||
id: version
|
||||
run: |
|
||||
echo "tag=$(python setup.py --version | sed 's/\.dev0//')" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request'
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ vars.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
- name: Login to Quay
|
||||
if: ${{ github.event_name != 'pull_request' && matrix.device == 'npu' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ vars.QUAY_ASCEND_USERNAME }}
|
||||
password: ${{ secrets.QUAY_ASCEND_TOKEN }}
|
||||
|
||||
- name: Build and push Docker image (CUDA)
|
||||
if: ${{ matrix.device == 'cuda' }}
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
@@ -61,6 +85,24 @@ jobs:
|
||||
build-args: |
|
||||
EXTRAS=metrics,deepspeed,liger-kernel
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: docker.io/hiyouga/llamafactory:latest
|
||||
tags: |
|
||||
docker.io/hiyouga/llamafactory:latest
|
||||
docker.io/hiyouga/llamafactory:${{ steps.version.outputs.tag }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Build and push Docker image (NPU)
|
||||
if: ${{ matrix.device == 'npu' }}
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/amd64,linux/arm64
|
||||
file: ./docker/docker-npu/Dockerfile
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: |
|
||||
docker.io/hiyouga/llamafactory:latest-npu-a2
|
||||
docker.io/hiyouga/llamafactory:${{ steps.version.outputs.tag }}-npu-a2
|
||||
quay.io/ascend/llamafactory:latest-npu-a2
|
||||
quay.io/ascend/llamafactory:${{ steps.version.outputs.tag }}-npu-a2
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
2
.github/workflows/label_issue.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
ISSUE_TITLE: ${{ github.event.issue.title }}
|
||||
run: |
|
||||
LABEL=""
|
||||
NPU_KEYWORDS=(npu huawei ascend 华为 昇腾)
|
||||
NPU_KEYWORDS=(npu huawei ascend 华为 昇腾 910)
|
||||
ISSUE_TITLE_LOWER=$(echo $ISSUE_TITLE | tr '[:upper:]' '[:lower:]')
|
||||
for KEYWORD in ${NPU_KEYWORDS[@]}; do
|
||||
if [[ $ISSUE_TITLE_LOWER == *$KEYWORD* ]] && [[ $ISSUE_TITLE_LOWER != *input* ]]; then
|
||||
|
||||
15
.github/workflows/tests.yml
vendored
@@ -6,14 +6,14 @@ on:
|
||||
branches:
|
||||
- "main"
|
||||
paths:
|
||||
- "**.py"
|
||||
- "**/*.py"
|
||||
- "requirements.txt"
|
||||
- ".github/workflows/*.yml"
|
||||
pull_request:
|
||||
branches:
|
||||
- "main"
|
||||
paths:
|
||||
- "**.py"
|
||||
- "**/*.py"
|
||||
- "requirements.txt"
|
||||
- ".github/workflows/*.yml"
|
||||
|
||||
@@ -34,15 +34,15 @@ jobs:
|
||||
transformers:
|
||||
- null
|
||||
include: # test backward compatibility
|
||||
- python: "3.9"
|
||||
os: "ubuntu-latest"
|
||||
transformers: "4.45.0"
|
||||
- python: "3.9"
|
||||
os: "ubuntu-latest"
|
||||
transformers: "4.49.0"
|
||||
- python: "3.9"
|
||||
os: "ubuntu-latest"
|
||||
transformers: "4.51.0"
|
||||
- python: "3.9"
|
||||
os: "ubuntu-latest"
|
||||
transformers: "4.53.0"
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
@@ -75,6 +75,11 @@ jobs:
|
||||
run: |
|
||||
python -m pip install "transformers==${{ matrix.transformers }}"
|
||||
|
||||
- name: Install transformers to avoid mac os ci errors
|
||||
if: ${{ matrix.os == 'macos-13' }}
|
||||
run: |
|
||||
python -m pip install "transformers<=4.51.3"
|
||||
|
||||
- name: Cache files
|
||||
id: hf-hub-cache
|
||||
uses: actions/cache@v4
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v5.0.0
|
||||
rev: v6.0.0
|
||||
hooks:
|
||||
- id: check-ast
|
||||
- id: check-added-large-files
|
||||
@@ -15,13 +15,13 @@ repos:
|
||||
args: ['--branch', 'main']
|
||||
|
||||
- repo: https://github.com/asottile/pyupgrade
|
||||
rev: v3.17.0
|
||||
rev: v3.20.0
|
||||
hooks:
|
||||
- id: pyupgrade
|
||||
args: [--py38-plus]
|
||||
args: [--py39-plus]
|
||||
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.6.9
|
||||
rev: v0.13.2
|
||||
hooks:
|
||||
- id: ruff
|
||||
args: [--fix]
|
||||
|
||||
231
README.md
@@ -5,16 +5,16 @@
|
||||
[](https://github.com/hiyouga/LLaMA-Factory/graphs/contributors)
|
||||
[](https://github.com/hiyouga/LLaMA-Factory/actions/workflows/tests.yml)
|
||||
[](https://pypi.org/project/llamafactory/)
|
||||
[](https://scholar.google.com/scholar?cites=12620864006390196564)
|
||||
[](https://scholar.google.com/scholar?cites=12620864006390196564)
|
||||
[](https://hub.docker.com/r/hiyouga/llamafactory/tags)
|
||||
|
||||
[](https://twitter.com/llamafactory_ai)
|
||||
[](https://discord.gg/rKfvV9r9FK)
|
||||
[](https://gitcode.com/zhengyaowei/LLaMA-Factory)
|
||||
[](https://discord.gg/rKfvV9r9FK)
|
||||
|
||||
[](https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing)
|
||||
[](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory)
|
||||
[](https://docs.alayanew.com/docs/documents/newActivities/llamafactory/?utm_source=LLaMA-Factory)
|
||||
[](https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing)
|
||||
[](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory)
|
||||
[](https://www.lab4ai.cn/course/detail?id=7c13e60f6137474eb40f6fd3983c0f46&utm_source=LLaMA-Factory)
|
||||
[](https://www.llamafactory.com.cn/?utm_source=LLaMA-Factory)
|
||||
[](https://huggingface.co/spaces/hiyouga/LLaMA-Board)
|
||||
[](https://modelscope.cn/studios/hiyouga/LLaMA-Board)
|
||||
[](https://novita.ai/templates-library/105981?sharer=88115474-394e-4bda-968e-b88e123d0c47)
|
||||
@@ -25,13 +25,8 @@
|
||||
|
||||
### Supporters ❤️
|
||||
|
||||
<a href="https://warp.dev/llama-factory">
|
||||
<img alt="Warp sponsorship" width="400" src="https://github.com/user-attachments/assets/ab8dd143-b0fd-4904-bdc5-dd7ecac94eae">
|
||||
</a>
|
||||
|
||||
#### [Warp, the agentic terminal for developers](https://warp.dev/llama-factory)
|
||||
|
||||
[Available for MacOS, Linux, & Windows](https://warp.dev/llama-factory)
|
||||
| <div style="text-align: center;"><a href="https://warp.dev/llama-factory"><img alt="Warp sponsorship" width="400" src="assets/sponsors/warp.jpg"></a><br><a href="https://warp.dev/llama-factory" style="font-size:larger;">Warp, the agentic terminal for developers</a><br><a href="https://warp.dev/llama-factory">Available for MacOS, Linux, & Windows</a> | <a href="https://serpapi.com"><img alt="SerpAPI sponsorship" width="250" src="assets/sponsors/serpapi.svg"> </a> |
|
||||
| ---- | ---- |
|
||||
|
||||
----
|
||||
|
||||
@@ -41,7 +36,7 @@
|
||||
|
||||
</div>
|
||||
|
||||
👋 Join our [WeChat group](assets/wechat.jpg), [NPU user group](assets/wechat_npu.jpg) or [Alaya NeW user group](assets/wechat_alaya.png).
|
||||
👋 Join our [WeChat](https://github.com/hiyouga/llamafactory-community/blob/main/wechat/main.jpg), [NPU](https://github.com/hiyouga/llamafactory-community/blob/main/wechat/npu.jpg), [Lab4AI](https://github.com/hiyouga/llamafactory-community/blob/main/wechat/lab4ai.jpg), [LLaMA Factory Online](https://github.com/hiyouga/llamafactory-community/blob/main/wechat/online.jpg) user group.
|
||||
|
||||
\[ English | [中文](README_zh.md) \]
|
||||
|
||||
@@ -51,11 +46,14 @@ https://github.com/user-attachments/assets/3991a3a8-4276-4d30-9cab-4cb0c4b9b99e
|
||||
|
||||
Choose your path:
|
||||
|
||||
- **Documentation**: https://llamafactory.readthedocs.io/en/latest/
|
||||
- **Documentation (WIP)**: https://llamafactory.readthedocs.io/en/latest/
|
||||
- **Documentation (AMD GPU)**: https://rocm.docs.amd.com/projects/ai-developer-hub/en/latest/notebooks/fine_tune/llama_factory_llama3.html
|
||||
- **Colab (free)**: https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing
|
||||
- **Local machine**: Please refer to [usage](#getting-started)
|
||||
- **PAI-DSW (free trial)**: https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory
|
||||
- **Alaya NeW (cloud GPU deal)**: https://docs.alayanew.com/docs/documents/useGuide/LLaMAFactory/mutiple/?utm_source=LLaMA-Factory
|
||||
- **Official Course**: https://www.lab4ai.cn/course/detail?id=7c13e60f6137474eb40f6fd3983c0f46&utm_source=LLaMA-Factory
|
||||
- **LLaMA Factory Online**: https://www.llamafactory.com.cn/?utm_source=LLaMA-Factory
|
||||
|
||||
> [!NOTE]
|
||||
> Except for the above links, all other websites are unauthorized third-party websites. Please carefully use them.
|
||||
@@ -74,6 +72,7 @@ Choose your path:
|
||||
- [Data Preparation](#data-preparation)
|
||||
- [Quickstart](#quickstart)
|
||||
- [Fine-Tuning with LLaMA Board GUI](#fine-tuning-with-llama-board-gui-powered-by-gradio)
|
||||
- [LLaMA Factory Online](#llama-factory-online)
|
||||
- [Build Docker](#build-docker)
|
||||
- [Deploy with OpenAI-style API and vLLM](#deploy-with-openai-style-api-and-vllm)
|
||||
- [Download from ModelScope Hub](#download-from-modelscope-hub)
|
||||
@@ -90,7 +89,7 @@ Choose your path:
|
||||
- **Various models**: LLaMA, LLaVA, Mistral, Mixtral-MoE, Qwen, Qwen2-VL, DeepSeek, Yi, Gemma, ChatGLM, Phi, etc.
|
||||
- **Integrated methods**: (Continuous) pre-training, (multimodal) supervised fine-tuning, reward modeling, PPO, DPO, KTO, ORPO, etc.
|
||||
- **Scalable resources**: 16-bit full-tuning, freeze-tuning, LoRA and 2/3/4/5/6/8-bit QLoRA via AQLM/AWQ/GPTQ/LLM.int8/HQQ/EETQ.
|
||||
- **Advanced algorithms**: [GaLore](https://github.com/jiaweizzhao/GaLore), [BAdam](https://github.com/Ledzy/BAdam), [APOLLO](https://github.com/zhuhanqing/APOLLO), [Adam-mini](https://github.com/zyushun/Adam-mini), [Muon](https://github.com/KellerJordan/Muon), DoRA, LongLoRA, LLaMA Pro, Mixture-of-Depths, LoRA+, LoftQ and PiSSA.
|
||||
- **Advanced algorithms**: [GaLore](https://github.com/jiaweizzhao/GaLore), [BAdam](https://github.com/Ledzy/BAdam), [APOLLO](https://github.com/zhuhanqing/APOLLO), [Adam-mini](https://github.com/zyushun/Adam-mini), [Muon](https://github.com/KellerJordan/Muon), [OFT](https://github.com/huggingface/peft/tree/main/src/peft/tuners/oft), DoRA, LongLoRA, LLaMA Pro, Mixture-of-Depths, LoRA+, LoftQ and PiSSA.
|
||||
- **Practical tricks**: [FlashAttention-2](https://github.com/Dao-AILab/flash-attention), [Unsloth](https://github.com/unslothai/unsloth), [Liger Kernel](https://github.com/linkedin/Liger-Kernel), RoPE scaling, NEFTune and rsLoRA.
|
||||
- **Wide tasks**: Multi-turn dialogue, tool using, image understanding, visual grounding, video recognition, audio understanding, etc.
|
||||
- **Experiment monitors**: LlamaBoard, TensorBoard, Wandb, MLflow, [SwanLab](https://github.com/SwanHubX/SwanLab), etc.
|
||||
@@ -98,28 +97,42 @@ Choose your path:
|
||||
|
||||
### Day-N Support for Fine-Tuning Cutting-Edge Models
|
||||
|
||||
| Support Date | Model Name |
|
||||
| ------------ | ------------------------------------------------------------ |
|
||||
| Day 0 | Qwen3 / Qwen2.5-VL / Gemma 3 / InternLM 3 / MiniCPM-o-2.6 |
|
||||
| Day 1 | Llama 3 / GLM-4 / Mistral Small / PaliGemma2 / Llama 4 |
|
||||
| Support Date | Model Name |
|
||||
| ------------ | -------------------------------------------------------------------- |
|
||||
| Day 0 | Qwen3 / Qwen2.5-VL / Gemma 3 / GLM-4.1V / InternLM 3 / MiniCPM-o-2.6 |
|
||||
| Day 1 | Llama 3 / GLM-4 / Mistral Small / PaliGemma2 / Llama 4 |
|
||||
|
||||
## Blogs
|
||||
|
||||
- [Fine-tune Qwen2.5-VL for Autonomous Driving using LLaMA-Factory](https://docs.alayanew.com/docs/documents/useGuide/LLaMAFactory/mutiple/?utm_source=LLaMA-Factory) (Chinese)
|
||||
- 💡 [Easy Dataset × LLaMA Factory: Enabling LLMs to Efficiently Learn Domain Knowledge](https://buaa-act.feishu.cn/wiki/GVzlwYcRFiR8OLkHbL6cQpYin7g) (English)
|
||||
- [Fine-tune a mental health LLM using LLaMA-Factory](https://www.lab4ai.cn/project/detail?id=25cce32ec131497b9e06a93336a0817f&type=project&utm_source=LLaMA-Factory) (Chinese)
|
||||
- [Fine-tune GPT-OSS for Role-Playing using LLaMA-Factory](https://docs.llamafactory.com.cn/docs/documents/best-practice/gptroleplay/?utm_source=LLaMA-Factory) (Chinese)
|
||||
- [A One-Stop Code-Free Model Reinforcement Learning and Deployment Platform based on LLaMA-Factory and EasyR1](https://aws.amazon.com/cn/blogs/china/building-llm-model-hub-based-on-llamafactory-and-easyr1/) (Chinese)
|
||||
- [How Apoidea Group enhances visual information extraction from banking documents with multimodal models using LLaMA-Factory on Amazon SageMaker HyperPod](https://aws.amazon.com/cn/blogs/machine-learning/how-apoidea-group-enhances-visual-information-extraction-from-banking-documents-with-multimodal-models-using-llama-factory-on-amazon-sagemaker-hyperpod/) (English)
|
||||
- [Easy Dataset × LLaMA Factory: Enabling LLMs to Efficiently Learn Domain Knowledge](https://buaa-act.feishu.cn/wiki/GVzlwYcRFiR8OLkHbL6cQpYin7g) (English)
|
||||
|
||||
<details><summary>All Blogs</summary>
|
||||
|
||||
- [Fine-tune Llama3.1-70B for Medical Diagnosis using LLaMA-Factory](https://docs.alayanew.com/docs/documents/bestPractice/bigModel/llama70B/?utm_source=LLaMA-Factory) (Chinese)
|
||||
- [Fine-tune Qwen2.5-VL for Autonomous Driving using LLaMA-Factory](https://docs.alayanew.com/docs/documents/useGuide/LLaMAFactory/mutiple/?utm_source=LLaMA-Factory) (Chinese)
|
||||
- [LLaMA Factory: Fine-tuning the DeepSeek-R1-Distill-Qwen-7B Model for News Classifier](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory_deepseek_r1_distill_7b) (Chinese)
|
||||
- [A One-Stop Code-Free Model Fine-Tuning \& Deployment Platform based on SageMaker and LLaMA-Factory](https://aws.amazon.com/cn/blogs/china/a-one-stop-code-free-model-fine-tuning-deployment-platform-based-on-sagemaker-and-llama-factory/) (Chinese)
|
||||
- [LLaMA Factory Multi-Modal Fine-Tuning Practice: Fine-Tuning Qwen2-VL for Personal Tourist Guide](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory_qwen2vl) (Chinese)
|
||||
- [LLaMA Factory: Fine-tuning the LLaMA3 Model for Role-Playing](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory) (Chinese)
|
||||
- [LLaMA Factory: Fine-tuning Llama3 for Role-Playing](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory) (Chinese)
|
||||
|
||||
</details>
|
||||
|
||||
## Changelog
|
||||
|
||||
[25/08/22] We supported **[OFT](https://arxiv.org/abs/2306.07280)** and **[OFTv2](https://arxiv.org/abs/2506.19847)**. See [examples](examples/README.md) for usage.
|
||||
|
||||
[25/08/20] We supported fine-tuning the **[Intern-S1-mini](https://huggingface.co/internlm/Intern-S1-mini)** models. See [PR #8976](https://github.com/hiyouga/LLaMA-Factory/pull/8976) to get started.
|
||||
|
||||
[25/08/06] We supported fine-tuning the **[GPT-OSS](https://github.com/openai/gpt-oss)** models. See [PR #8826](https://github.com/hiyouga/LLaMA-Factory/pull/8826) to get started.
|
||||
|
||||
<details><summary>Full Changelog</summary>
|
||||
|
||||
[25/07/02] We supported fine-tuning the **[GLM-4.1V-9B-Thinking](https://github.com/THUDM/GLM-4.1V-Thinking)** model.
|
||||
|
||||
[25/04/28] We supported fine-tuning the **[Qwen3](https://qwenlm.github.io/blog/qwen3/)** model family.
|
||||
|
||||
[25/04/21] We supported the **[Muon](https://github.com/KellerJordan/Muon)** optimizer. See [examples](examples/README.md) for usage. Thank [@tianshijing](https://github.com/tianshijing)'s PR.
|
||||
@@ -130,8 +143,6 @@ Choose your path:
|
||||
|
||||
[25/04/06] We supported fine-tuning the **[Llama 4](https://ai.meta.com/blog/llama-4-multimodal-intelligence/)** model. See [PR #7611](https://github.com/hiyouga/LLaMA-Factory/pull/7611) to get started.
|
||||
|
||||
<details><summary>Full Changelog</summary>
|
||||
|
||||
[25/03/31] We supported fine-tuning the **[Qwen2.5 Omni](https://qwenlm.github.io/blog/qwen2.5-omni/)** model. See [PR #7537](https://github.com/hiyouga/LLaMA-Factory/pull/7537) to get started.
|
||||
|
||||
[25/03/15] We supported **[SGLang](https://github.com/sgl-project/sglang)** as inference backend. Try `infer_backend: sglang` to accelerate inference.
|
||||
@@ -251,60 +262,70 @@ Choose your path:
|
||||
|
||||
## Supported Models
|
||||
|
||||
| Model | Model size | Template |
|
||||
| ----------------------------------------------------------------- | -------------------------------- | ------------------- |
|
||||
| [Baichuan 2](https://huggingface.co/baichuan-inc) | 7B/13B | baichuan2 |
|
||||
| [BLOOM/BLOOMZ](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | - |
|
||||
| [ChatGLM3](https://huggingface.co/THUDM) | 6B | chatglm3 |
|
||||
| [Command R](https://huggingface.co/CohereForAI) | 35B/104B | cohere |
|
||||
| [DeepSeek (Code/MoE)](https://huggingface.co/deepseek-ai) | 7B/16B/67B/236B | deepseek |
|
||||
| [DeepSeek 2.5/3](https://huggingface.co/deepseek-ai) | 236B/671B | deepseek3 |
|
||||
| [DeepSeek R1 (Distill)](https://huggingface.co/deepseek-ai) | 1.5B/7B/8B/14B/32B/70B/671B | deepseekr1 |
|
||||
| [Falcon](https://huggingface.co/tiiuae) | 7B/11B/40B/180B | falcon |
|
||||
| [Gemma/Gemma 2/CodeGemma](https://huggingface.co/google) | 2B/7B/9B/27B | gemma |
|
||||
| [Gemma 3](https://huggingface.co/google) | 1B/4B/12B/27B | gemma3/gemma (1B) |
|
||||
| [GLM-4/GLM-4-0414/GLM-Z1](https://huggingface.co/THUDM) | 9B/32B | glm4/glmz1 |
|
||||
| [GPT-2](https://huggingface.co/openai-community) | 0.1B/0.4B/0.8B/1.5B | - |
|
||||
| [Granite 3.0-3.3](https://huggingface.co/ibm-granite) | 1B/2B/3B/8B | granite3 |
|
||||
| [Hunyuan](https://huggingface.co/tencent/) | 7B | hunyuan |
|
||||
| [Index](https://huggingface.co/IndexTeam) | 1.9B | index |
|
||||
| [InternLM 2-3](https://huggingface.co/internlm) | 7B/8B/20B | intern2 |
|
||||
| [InternVL 2.5-3](https://huggingface.co/OpenGVLab) | 1B/2B/8B/14B/38B/78B | intern_vl |
|
||||
| [Kimi-VL](https://huggingface.co/moonshotai) | 16B | kimi_vl |
|
||||
| [Llama](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - |
|
||||
| [Llama 2](https://huggingface.co/meta-llama) | 7B/13B/70B | llama2 |
|
||||
| [Llama 3-3.3](https://huggingface.co/meta-llama) | 1B/3B/8B/70B | llama3 |
|
||||
| [Llama 4](https://huggingface.co/meta-llama) | 109B/402B | llama4 |
|
||||
| [Llama 3.2 Vision](https://huggingface.co/meta-llama) | 11B/90B | mllama |
|
||||
| [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | llava |
|
||||
| [LLaVA-NeXT](https://huggingface.co/llava-hf) | 7B/8B/13B/34B/72B/110B | llava_next |
|
||||
| [LLaVA-NeXT-Video](https://huggingface.co/llava-hf) | 7B/34B | llava_next_video |
|
||||
| [MiMo](https://huggingface.co/XiaomiMiMo) | 7B | mimo |
|
||||
| [MiniCPM](https://huggingface.co/openbmb) | 0.5B/1B/2B/4B/8B | cpm/cpm3/cpm4 |
|
||||
| [MiniCPM-o-2.6/MiniCPM-V-2.6](https://huggingface.co/openbmb) | 8B | minicpm_o/minicpm_v |
|
||||
| [Ministral/Mistral-Nemo](https://huggingface.co/mistralai) | 8B/12B | ministral |
|
||||
| [Mistral/Mixtral](https://huggingface.co/mistralai) | 7B/8x7B/8x22B | mistral |
|
||||
| [Mistral Small](https://huggingface.co/mistralai) | 24B | mistral_small |
|
||||
| [OLMo](https://huggingface.co/allenai) | 1B/7B | - |
|
||||
| [PaliGemma/PaliGemma2](https://huggingface.co/google) | 3B/10B/28B | paligemma |
|
||||
| [Phi-1.5/Phi-2](https://huggingface.co/microsoft) | 1.3B/2.7B | - |
|
||||
| [Phi-3/Phi-3.5](https://huggingface.co/microsoft) | 4B/14B | phi |
|
||||
| [Phi-3-small](https://huggingface.co/microsoft) | 7B | phi_small |
|
||||
| [Phi-4](https://huggingface.co/microsoft) | 14B | phi4 |
|
||||
| [Pixtral](https://huggingface.co/mistralai) | 12B | pixtral |
|
||||
| [Qwen (1-2.5) (Code/Math/MoE/QwQ)](https://huggingface.co/Qwen) | 0.5B/1.5B/3B/7B/14B/32B/72B/110B | qwen |
|
||||
| [Qwen3 (MoE)](https://huggingface.co/Qwen) | 0.6B/1.7B/4B/8B/14B/32B/235B | qwen3 |
|
||||
| [Qwen2-Audio](https://huggingface.co/Qwen) | 7B | qwen2_audio |
|
||||
| [Qwen2.5-Omni](https://huggingface.co/Qwen) | 3B/7B | qwen2_omni |
|
||||
| [Qwen2-VL/Qwen2.5-VL/QVQ](https://huggingface.co/Qwen) | 2B/3B/7B/32B/72B | qwen2_vl |
|
||||
| [Seed Coder](https://huggingface.co/ByteDance-Seed) | 8B | seed_coder |
|
||||
| [Skywork o1](https://huggingface.co/Skywork) | 8B | skywork_o1 |
|
||||
| [StarCoder 2](https://huggingface.co/bigcode) | 3B/7B/15B | - |
|
||||
| [TeleChat2](https://huggingface.co/Tele-AI) | 3B/7B/35B/115B | telechat2 |
|
||||
| [XVERSE](https://huggingface.co/xverse) | 7B/13B/65B | xverse |
|
||||
| [Yi/Yi-1.5 (Code)](https://huggingface.co/01-ai) | 1.5B/6B/9B/34B | yi |
|
||||
| [Yi-VL](https://huggingface.co/01-ai) | 6B/34B | yi_vl |
|
||||
| [Yuan 2](https://huggingface.co/IEITYuan) | 2B/51B/102B | yuan |
|
||||
| Model | Model size | Template |
|
||||
| ----------------------------------------------------------------- | -------------------------------- | -------------------- |
|
||||
| [Baichuan 2](https://huggingface.co/baichuan-inc) | 7B/13B | baichuan2 |
|
||||
| [BLOOM/BLOOMZ](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | - |
|
||||
| [ChatGLM3](https://huggingface.co/THUDM) | 6B | chatglm3 |
|
||||
| [Command R](https://huggingface.co/CohereForAI) | 35B/104B | cohere |
|
||||
| [DeepSeek (Code/MoE)](https://huggingface.co/deepseek-ai) | 7B/16B/67B/236B | deepseek |
|
||||
| [DeepSeek 2.5/3](https://huggingface.co/deepseek-ai) | 236B/671B | deepseek3 |
|
||||
| [DeepSeek R1 (Distill)](https://huggingface.co/deepseek-ai) | 1.5B/7B/8B/14B/32B/70B/671B | deepseekr1 |
|
||||
| [ERNIE-4.5](https://huggingface.co/baidu) | 0.3B/21B/300B | ernie/ernie_nothink |
|
||||
| [Falcon](https://huggingface.co/tiiuae) | 7B/11B/40B/180B | falcon |
|
||||
| [Falcon-H1](https://huggingface.co/tiiuae) | 0.5B/1.5B/3B/7B/34B | falcon_h1 |
|
||||
| [Gemma/Gemma 2/CodeGemma](https://huggingface.co/google) | 2B/7B/9B/27B | gemma/gemma2 |
|
||||
| [Gemma 3/Gemma 3n](https://huggingface.co/google) | 270M/1B/4B/6B/8B/12B/27B | gemma3/gemma3n |
|
||||
| [GLM-4/GLM-4-0414/GLM-Z1](https://huggingface.co/zai-org) | 9B/32B | glm4/glmz1 |
|
||||
| [GLM-4.1V](https://huggingface.co/zai-org) | 9B | glm4v |
|
||||
| [GLM-4.5/GLM-4.5V](https://huggingface.co/zai-org) | 106B/355B | glm4_moe/glm4v_moe |
|
||||
| [GPT-2](https://huggingface.co/openai-community) | 0.1B/0.4B/0.8B/1.5B | - |
|
||||
| [GPT-OSS](https://huggingface.co/openai) | 20B/120B | gpt |
|
||||
| [Granite 3.0-3.3](https://huggingface.co/ibm-granite) | 1B/2B/3B/8B | granite3 |
|
||||
| [Granite 4](https://huggingface.co/ibm-granite) | 7B | granite4 |
|
||||
| [Hunyuan](https://huggingface.co/tencent/) | 7B | hunyuan |
|
||||
| [Index](https://huggingface.co/IndexTeam) | 1.9B | index |
|
||||
| [InternLM 2-3](https://huggingface.co/internlm) | 7B/8B/20B | intern2 |
|
||||
| [InternVL 2.5-3.5](https://huggingface.co/OpenGVLab) | 1B/2B/4B/8B/14B/30B/38B/78B/241B | intern_vl |
|
||||
| [InternLM/Intern-S1-mini](https://huggingface.co/internlm/) | 8B | intern_s1 |
|
||||
| [Kimi-VL](https://huggingface.co/moonshotai) | 16B | kimi_vl |
|
||||
| [Ling 2.0 (mini/flash)](https://huggingface.co/inclusionAI) | 16B/100B | bailing_v2 |
|
||||
| [Llama](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - |
|
||||
| [Llama 2](https://huggingface.co/meta-llama) | 7B/13B/70B | llama2 |
|
||||
| [Llama 3-3.3](https://huggingface.co/meta-llama) | 1B/3B/8B/70B | llama3 |
|
||||
| [Llama 4](https://huggingface.co/meta-llama) | 109B/402B | llama4 |
|
||||
| [Llama 3.2 Vision](https://huggingface.co/meta-llama) | 11B/90B | mllama |
|
||||
| [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | llava |
|
||||
| [LLaVA-NeXT](https://huggingface.co/llava-hf) | 7B/8B/13B/34B/72B/110B | llava_next |
|
||||
| [LLaVA-NeXT-Video](https://huggingface.co/llava-hf) | 7B/34B | llava_next_video |
|
||||
| [MiMo](https://huggingface.co/XiaomiMiMo) | 7B | mimo |
|
||||
| [MiniCPM 1-4.1](https://huggingface.co/openbmb) | 0.5B/1B/2B/4B/8B | cpm/cpm3/cpm4 |
|
||||
| [MiniCPM-o-2.6/MiniCPM-V-2.6](https://huggingface.co/openbmb) | 8B | minicpm_o/minicpm_v |
|
||||
| [Ministral/Mistral-Nemo](https://huggingface.co/mistralai) | 8B/12B | ministral |
|
||||
| [Mistral/Mixtral](https://huggingface.co/mistralai) | 7B/8x7B/8x22B | mistral |
|
||||
| [Mistral Small](https://huggingface.co/mistralai) | 24B | mistral_small |
|
||||
| [OLMo](https://huggingface.co/allenai) | 1B/7B | - |
|
||||
| [PaliGemma/PaliGemma2](https://huggingface.co/google) | 3B/10B/28B | paligemma |
|
||||
| [Phi-1.5/Phi-2](https://huggingface.co/microsoft) | 1.3B/2.7B | - |
|
||||
| [Phi-3/Phi-3.5](https://huggingface.co/microsoft) | 4B/14B | phi |
|
||||
| [Phi-3-small](https://huggingface.co/microsoft) | 7B | phi_small |
|
||||
| [Phi-4](https://huggingface.co/microsoft) | 14B | phi4 |
|
||||
| [Pixtral](https://huggingface.co/mistralai) | 12B | pixtral |
|
||||
| [Qwen (1-2.5) (Code/Math/MoE/QwQ)](https://huggingface.co/Qwen) | 0.5B/1.5B/3B/7B/14B/32B/72B/110B | qwen |
|
||||
| [Qwen3 (MoE/Instruct/Thinking/Next)](https://huggingface.co/Qwen) | 0.6B/1.7B/4B/8B/14B/32B/80B/235B | qwen3/qwen3_nothink |
|
||||
| [Qwen2-Audio](https://huggingface.co/Qwen) | 7B | qwen2_audio |
|
||||
| [Qwen2.5-Omni](https://huggingface.co/Qwen) | 3B/7B | qwen2_omni |
|
||||
| [Qwen3-Omni](https://huggingface.co/Qwen)* | 30B | qwen3_omni |
|
||||
| [Qwen2-VL/Qwen2.5-VL/QVQ](https://huggingface.co/Qwen) | 2B/3B/7B/32B/72B | qwen2_vl |
|
||||
| [Qwen3-VL](https://huggingface.co/Qwen)* | 235B | qwen3_vl |
|
||||
| [Seed (OSS/Coder)](https://huggingface.co/ByteDance-Seed) | 8B/36B | seed_oss/seed_coder |
|
||||
| [Skywork o1](https://huggingface.co/Skywork) | 8B | skywork_o1 |
|
||||
| [StarCoder 2](https://huggingface.co/bigcode) | 3B/7B/15B | - |
|
||||
| [TeleChat2](https://huggingface.co/Tele-AI) | 3B/7B/35B/115B | telechat2 |
|
||||
| [XVERSE](https://huggingface.co/xverse) | 7B/13B/65B | xverse |
|
||||
| [Yi/Yi-1.5 (Code)](https://huggingface.co/01-ai) | 1.5B/6B/9B/34B | yi |
|
||||
| [Yi-VL](https://huggingface.co/01-ai) | 6B/34B | yi_vl |
|
||||
| [Yuan 2](https://huggingface.co/IEITYuan) | 2B/51B/102B | yuan |
|
||||
|
||||
> [!NOTE]
|
||||
> For the "base" models, the `template` argument can be chosen from `default`, `alpaca`, `vicuna` etc. But make sure to use the **corresponding template** for the "instruct/chat" models.
|
||||
@@ -321,16 +342,16 @@ You also can add a custom chat template to [template.py](src/llamafactory/data/t
|
||||
|
||||
## Supported Training Approaches
|
||||
|
||||
| Approach | Full-tuning | Freeze-tuning | LoRA | QLoRA |
|
||||
| ---------------------- | ------------------ | ------------------ | ------------------ | ------------------ |
|
||||
| Pre-Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| Supervised Fine-Tuning | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| Reward Modeling | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| PPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| DPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| KTO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| ORPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| SimPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| Approach | Full-tuning | Freeze-tuning | LoRA | QLoRA | OFT | QOFT |
|
||||
| ---------------------- | ------------------ | ------------------ | ------------------ | ------------------ | ------------------ | ------------------ |
|
||||
| Pre-Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| Supervised Fine-Tuning | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| Reward Modeling | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| PPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| DPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| KTO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| ORPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| SimPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
|
||||
> [!TIP]
|
||||
> The implementation details of PPO can be found in [this blog](https://newfacade.github.io/notes-on-reinforcement-learning/17-ppo-trl.html).
|
||||
@@ -348,6 +369,11 @@ You also can add a custom chat template to [template.py](src/llamafactory/data/t
|
||||
- [SkyPile (zh)](https://huggingface.co/datasets/Skywork/SkyPile-150B)
|
||||
- [FineWeb (en)](https://huggingface.co/datasets/HuggingFaceFW/fineweb)
|
||||
- [FineWeb-Edu (en)](https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu)
|
||||
- [CCI3-HQ (zh)](https://huggingface.co/datasets/BAAI/CCI3-HQ)
|
||||
- [CCI3-Data (zh)](https://huggingface.co/datasets/BAAI/CCI3-Data)
|
||||
- [CCI4.0-M2-Base-v1 (en&zh)](https://huggingface.co/datasets/BAAI/CCI4.0-M2-Base-v1)
|
||||
- [CCI4.0-M2-CoT-v1 (en&zh)](https://huggingface.co/datasets/BAAI/CCI4.0-M2-CoT-v1)
|
||||
- [CCI4.0-M2-Extra-v1 (en&zh)](https://huggingface.co/datasets/BAAI/CCI4.0-M2-Extra-v1)
|
||||
- [The Stack (en)](https://huggingface.co/datasets/bigcode/the-stack)
|
||||
- [StarCoder (en)](https://huggingface.co/datasets/bigcode/starcoderdata)
|
||||
|
||||
@@ -385,6 +411,7 @@ You also can add a custom chat template to [template.py](src/llamafactory/data/t
|
||||
- [ShareGPT Hyperfiltered (en)](https://huggingface.co/datasets/totally-not-an-llm/sharegpt-hyperfiltered-3k)
|
||||
- [ShareGPT4 (en&zh)](https://huggingface.co/datasets/shibing624/sharegpt_gpt4)
|
||||
- [UltraChat 200k (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrachat_200k)
|
||||
- [Infinity Instruct (zh)](https://huggingface.co/datasets/BAAI/Infinity-Instruct)
|
||||
- [AgentInstruct (en)](https://huggingface.co/datasets/THUDM/AgentInstruct)
|
||||
- [LMSYS Chat 1M (en)](https://huggingface.co/datasets/lmsys/lmsys-chat-1m)
|
||||
- [Evol Instruct V2 (en)](https://huggingface.co/datasets/WizardLM/WizardLM_evol_instruct_V2_196k)
|
||||
@@ -443,7 +470,7 @@ huggingface-cli login
|
||||
| python | 3.9 | 3.10 |
|
||||
| torch | 2.0.0 | 2.6.0 |
|
||||
| torchvision | 0.15.0 | 0.21.0 |
|
||||
| transformers | 4.45.0 | 4.50.0 |
|
||||
| transformers | 4.49.0 | 4.50.0 |
|
||||
| datasets | 2.16.0 | 3.2.0 |
|
||||
| accelerate | 0.34.0 | 1.2.1 |
|
||||
| peft | 0.14.0 | 0.15.1 |
|
||||
@@ -461,14 +488,14 @@ huggingface-cli login
|
||||
|
||||
\* *estimated*
|
||||
|
||||
| Method | Bits | 7B | 14B | 30B | 70B | `x`B |
|
||||
| ------------------------------- | ---- | ----- | ----- | ----- | ------ | ------- |
|
||||
| Full (`bf16` or `fp16`) | 32 | 120GB | 240GB | 600GB | 1200GB | `18x`GB |
|
||||
| Full (`pure_bf16`) | 16 | 60GB | 120GB | 300GB | 600GB | `8x`GB |
|
||||
| Freeze/LoRA/GaLore/APOLLO/BAdam | 16 | 16GB | 32GB | 64GB | 160GB | `2x`GB |
|
||||
| QLoRA | 8 | 10GB | 20GB | 40GB | 80GB | `x`GB |
|
||||
| QLoRA | 4 | 6GB | 12GB | 24GB | 48GB | `x/2`GB |
|
||||
| QLoRA | 2 | 4GB | 8GB | 16GB | 24GB | `x/4`GB |
|
||||
| Method | Bits | 7B | 14B | 30B | 70B | `x`B |
|
||||
| ----------------------------------- | ---- | ----- | ----- | ----- | ------ | ------- |
|
||||
| Full (`bf16` or `fp16`) | 32 | 120GB | 240GB | 600GB | 1200GB | `18x`GB |
|
||||
| Full (`pure_bf16`) | 16 | 60GB | 120GB | 300GB | 600GB | `8x`GB |
|
||||
| Freeze/LoRA/GaLore/APOLLO/BAdam/OFT | 16 | 16GB | 32GB | 64GB | 160GB | `2x`GB |
|
||||
| QLoRA / QOFT | 8 | 10GB | 20GB | 40GB | 80GB | `x`GB |
|
||||
| QLoRA / QOFT | 4 | 6GB | 12GB | 24GB | 48GB | `x/2`GB |
|
||||
| QLoRA / QOFT | 2 | 4GB | 8GB | 16GB | 24GB | `x/4`GB |
|
||||
|
||||
## Getting Started
|
||||
|
||||
@@ -485,7 +512,7 @@ cd LLaMA-Factory
|
||||
pip install -e ".[torch,metrics]" --no-build-isolation
|
||||
```
|
||||
|
||||
Extra dependencies available: torch, torch-npu, metrics, deepspeed, liger-kernel, bitsandbytes, hqq, eetq, gptq, aqlm, vllm, sglang, galore, apollo, badam, adam-mini, qwen, minicpm_v, modelscope, openmind, swanlab, dev
|
||||
Extra dependencies available: torch, torch-npu, metrics, deepspeed, liger-kernel, bitsandbytes, hqq, eetq, gptq, aqlm, vllm, sglang, galore, apollo, badam, adam-mini, qwen, minicpm_v, openmind, swanlab, dev
|
||||
|
||||
#### Install from Docker Image
|
||||
|
||||
@@ -620,7 +647,7 @@ Please refer to [data/README.md](data/README.md) for checking the details about
|
||||
> [!NOTE]
|
||||
> Please update `data/dataset_info.json` to use your custom dataset.
|
||||
|
||||
You can also use **[Easy Dataset](https://github.com/ConardLi/easy-dataset)** or **[GraphGen](https://github.com/open-sciencelab/GraphGen)** to create synthetic data for fine-tuning.
|
||||
You can also use **[Easy Dataset](https://github.com/ConardLi/easy-dataset)**, **[DataFlow](https://github.com/OpenDCAI/DataFlow)** and **[GraphGen](https://github.com/open-sciencelab/GraphGen)** to create synthetic data for fine-tuning.
|
||||
|
||||
### Quickstart
|
||||
|
||||
@@ -645,6 +672,10 @@ See [examples/README.md](examples/README.md) for advanced usage (including distr
|
||||
llamafactory-cli webui
|
||||
```
|
||||
|
||||
### LLaMA Factory Online
|
||||
|
||||
Read our [documentation](https://docs.llamafactory.com.cn/docs/documents/quickstart/getstarted/?utm_source=LLaMA-Factory).
|
||||
|
||||
### Build Docker
|
||||
|
||||
For CUDA users:
|
||||
|
||||
193
README_zh.md
@@ -5,16 +5,16 @@
|
||||
[](https://github.com/hiyouga/LLaMA-Factory/graphs/contributors)
|
||||
[](https://github.com/hiyouga/LLaMA-Factory/actions/workflows/tests.yml)
|
||||
[](https://pypi.org/project/llamafactory/)
|
||||
[](https://scholar.google.com/scholar?cites=12620864006390196564)
|
||||
[](https://scholar.google.com/scholar?cites=12620864006390196564)
|
||||
[](https://hub.docker.com/r/hiyouga/llamafactory/tags)
|
||||
|
||||
[](https://twitter.com/llamafactory_ai)
|
||||
[](https://discord.gg/rKfvV9r9FK)
|
||||
[](https://gitcode.com/zhengyaowei/LLaMA-Factory)
|
||||
[](https://discord.gg/rKfvV9r9FK)
|
||||
|
||||
[](https://colab.research.google.com/drive/1d5KQtbemerlSDSxZIfAaWXhKr30QypiK?usp=sharing)
|
||||
[](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory)
|
||||
[](https://docs.alayanew.com/docs/documents/newActivities/llamafactory/?utm_source=LLaMA-Factory)
|
||||
[](https://colab.research.google.com/drive/1d5KQtbemerlSDSxZIfAaWXhKr30QypiK?usp=sharing)
|
||||
[](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory)
|
||||
[](https://www.lab4ai.cn/course/detail?id=7c13e60f6137474eb40f6fd3983c0f46&utm_source=LLaMA-Factory)
|
||||
[](https://www.llamafactory.com.cn/?utm_source=LLaMA-Factory)
|
||||
[](https://huggingface.co/spaces/hiyouga/LLaMA-Board)
|
||||
[](https://modelscope.cn/studios/hiyouga/LLaMA-Board)
|
||||
[](https://novita.ai/templates-library/105981?sharer=88115474-394e-4bda-968e-b88e123d0c47)
|
||||
@@ -25,13 +25,8 @@
|
||||
|
||||
### 赞助商 ❤️
|
||||
|
||||
<a href="https://warp.dev/llama-factory">
|
||||
<img alt="Warp sponsorship" width="400" src="https://github.com/user-attachments/assets/ab8dd143-b0fd-4904-bdc5-dd7ecac94eae">
|
||||
</a>
|
||||
|
||||
#### [Warp,面向开发者的智能终端](https://warp.dev/llama-factory)
|
||||
|
||||
[适用于 MacOS、Linux 和 Windows](https://warp.dev/llama-factory)
|
||||
| <div style="text-align: center;"><a href="https://warp.dev/llama-factory"><img alt="Warp sponsorship" width="400" src="assets/sponsors/warp.jpg"></a><br><a href="https://warp.dev/llama-factory" style="font-size:larger;">Warp,面向开发者的智能终端</a><br><a href="https://warp.dev/llama-factory">适用于 MacOS、Linux 和 Windows</a> | <a href="https://serpapi.com"><img alt="SerpAPI sponsorship" width="250" src="assets/sponsors/serpapi.svg"> </a> |
|
||||
| ---- | ---- |
|
||||
|
||||
----
|
||||
|
||||
@@ -41,7 +36,7 @@
|
||||
|
||||
</div>
|
||||
|
||||
👋 加入我们的[微信群](assets/wechat.jpg)、[NPU 用户群](assets/wechat_npu.jpg)或 [九章智算云算力优惠群](assets/wechat_alaya.png)。
|
||||
👋 加入我们的[微信群](https://github.com/hiyouga/llamafactory-community/blob/main/wechat/main.jpg)、[NPU 用户群](https://github.com/hiyouga/llamafactory-community/blob/main/wechat/npu.jpg)、[大模型实验室群](https://github.com/hiyouga/llamafactory-community/blob/main/wechat/lab4ai.jpg) 或 [LLaMA Factory Online 用户群](https://github.com/hiyouga/llamafactory-community/blob/main/wechat/online.png)。
|
||||
|
||||
\[ [English](README.md) | 中文 \]
|
||||
|
||||
@@ -52,12 +47,15 @@ https://github.com/user-attachments/assets/43b700c6-a178-41db-b1f8-8190a5d3fcfc
|
||||
选择你的打开方式:
|
||||
|
||||
- **入门教程**:https://zhuanlan.zhihu.com/p/695287607
|
||||
- **微调视频教程**:https://www.bilibili.com/video/BV1djgRzxEts/
|
||||
- **框架文档**:https://llamafactory.readthedocs.io/zh-cn/latest/
|
||||
- **框架文档(昇腾 NPU)**:https://ascend.github.io/docs/sources/llamafactory/
|
||||
- **Colab(免费)**:https://colab.research.google.com/drive/1d5KQtbemerlSDSxZIfAaWXhKr30QypiK?usp=sharing
|
||||
- **本地机器**:请见[如何使用](#如何使用)
|
||||
- **PAI-DSW(免费试用)**:https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory
|
||||
- **九章智算云(算力优惠活动)**:https://docs.alayanew.com/docs/documents/useGuide/LLaMAFactory/mutiple/?utm_source=LLaMA-Factory
|
||||
- **官方课程**:https://www.lab4ai.cn/course/detail?id=7c13e60f6137474eb40f6fd3983c0f46&utm_source=LLaMA-Factory
|
||||
- **LLaMA Factory Online(在线微调)**:https://www.llamafactory.com.cn/?utm_source=LLaMA-Factory
|
||||
|
||||
> [!NOTE]
|
||||
> 除上述链接以外的其他网站均为未经许可的第三方网站,请小心甄别。
|
||||
@@ -76,6 +74,7 @@ https://github.com/user-attachments/assets/43b700c6-a178-41db-b1f8-8190a5d3fcfc
|
||||
- [数据准备](#数据准备)
|
||||
- [快速开始](#快速开始)
|
||||
- [LLaMA Board 可视化微调](#llama-board-可视化微调由-gradio-驱动)
|
||||
- [LLaMA Factory Online 在线微调](#llama-factory-online-在线微调)
|
||||
- [构建 Docker](#构建-docker)
|
||||
- [利用 vLLM 部署 OpenAI API](#利用-vllm-部署-openai-api)
|
||||
- [从魔搭社区下载](#从魔搭社区下载)
|
||||
@@ -92,7 +91,7 @@ https://github.com/user-attachments/assets/43b700c6-a178-41db-b1f8-8190a5d3fcfc
|
||||
- **多种模型**:LLaMA、LLaVA、Mistral、Mixtral-MoE、Qwen、Qwen2-VL、DeepSeek、Yi、Gemma、ChatGLM、Phi 等等。
|
||||
- **集成方法**:(增量)预训练、(多模态)指令监督微调、奖励模型训练、PPO 训练、DPO 训练、KTO 训练、ORPO 训练等等。
|
||||
- **多种精度**:16 比特全参数微调、冻结微调、LoRA 微调和基于 AQLM/AWQ/GPTQ/LLM.int8/HQQ/EETQ 的 2/3/4/5/6/8 比特 QLoRA 微调。
|
||||
- **先进算法**:[GaLore](https://github.com/jiaweizzhao/GaLore)、[BAdam](https://github.com/Ledzy/BAdam)、[APOLLO](https://github.com/zhuhanqing/APOLLO)、[Adam-mini](https://github.com/zyushun/Adam-mini)、[Muon](https://github.com/KellerJordan/Muon)、DoRA、LongLoRA、LLaMA Pro、Mixture-of-Depths、LoRA+、LoftQ 和 PiSSA。
|
||||
- **先进算法**:[GaLore](https://github.com/jiaweizzhao/GaLore)、[BAdam](https://github.com/Ledzy/BAdam)、[APOLLO](https://github.com/zhuhanqing/APOLLO)、[Adam-mini](https://github.com/zyushun/Adam-mini)、[Muon](https://github.com/KellerJordan/Muon)、[OFT](https://github.com/huggingface/peft/tree/main/src/peft/tuners/oft)、DoRA、LongLoRA、LLaMA Pro、Mixture-of-Depths、LoRA+、LoftQ 和 PiSSA。
|
||||
- **实用技巧**:[FlashAttention-2](https://github.com/Dao-AILab/flash-attention)、[Unsloth](https://github.com/unslothai/unsloth)、[Liger Kernel](https://github.com/linkedin/Liger-Kernel)、RoPE scaling、NEFTune 和 rsLoRA。
|
||||
- **广泛任务**:多轮对话、工具调用、图像理解、视觉定位、视频识别和语音理解等等。
|
||||
- **实验监控**:LlamaBoard、TensorBoard、Wandb、MLflow、[SwanLab](https://github.com/SwanHubX/SwanLab) 等等。
|
||||
@@ -100,28 +99,42 @@ https://github.com/user-attachments/assets/43b700c6-a178-41db-b1f8-8190a5d3fcfc
|
||||
|
||||
### 最新模型的 Day-N 微调适配
|
||||
|
||||
| 适配时间 | 模型名称 |
|
||||
| ------------ | ------------------------------------------------------------ |
|
||||
| Day 0 | Qwen3 / Qwen2.5-VL / Gemma 3 / InternLM 3 / MiniCPM-o-2.6 |
|
||||
| Day 1 | Llama 3 / GLM-4 / Mistral Small / PaliGemma2 / Llama 4 |
|
||||
| 适配时间 | 模型名称 |
|
||||
| ------------ | -------------------------------------------------------------------- |
|
||||
| Day 0 | Qwen3 / Qwen2.5-VL / Gemma 3 / GLM-4.1V / InternLM 3 / MiniCPM-o-2.6 |
|
||||
| Day 1 | Llama 3 / GLM-4 / Mistral Small / PaliGemma2 / Llama 4 |
|
||||
|
||||
## 官方博客
|
||||
|
||||
- [使用 LLaMA-Factory 微调 Qwen2.5-VL 实现自动驾驶场景微调](https://docs.alayanew.com/docs/documents/useGuide/LLaMAFactory/mutiple/?utm_source=LLaMA-Factory)(中文)
|
||||
- 💡 [Easy Dataset × LLaMA Factory: 让大模型高效学习领域知识](https://buaa-act.feishu.cn/wiki/KY9xwTGs1iqHrRkjXBwcZP9WnL9)(中文)
|
||||
- [使用 LLaMA-Factory 微调心理健康大模型](https://www.lab4ai.cn/project/detail?id=25cce32ec131497b9e06a93336a0817f&type=project&utm_source=LLaMA-Factory)(中文)
|
||||
- [使用 LLaMA-Factory 构建 GPT-OSS 角色扮演模型](https://docs.llamafactory.com.cn/docs/documents/best-practice/gptroleplay/?utm_source=LLaMA-Factory)(中文)
|
||||
- [基于 LLaMA-Factory 和 EasyR1 打造一站式无代码大模型强化学习和部署平台 LLM Model Hub](https://aws.amazon.com/cn/blogs/china/building-llm-model-hub-based-on-llamafactory-and-easyr1/)(中文)
|
||||
- [通过亚马逊 SageMaker HyperPod 上的 LLaMA-Factory 增强多模态模型银行文档的视觉信息提取](https://aws.amazon.com/cn/blogs/machine-learning/how-apoidea-group-enhances-visual-information-extraction-from-banking-documents-with-multimodal-models-using-llama-factory-on-amazon-sagemaker-hyperpod/)(英文)
|
||||
- [Easy Dataset × LLaMA Factory: 让大模型高效学习领域知识](https://buaa-act.feishu.cn/wiki/KY9xwTGs1iqHrRkjXBwcZP9WnL9)(中文)
|
||||
|
||||
<details><summary>全部博客</summary>
|
||||
|
||||
- [使用 LLaMA-Factory 微调 Llama3.1-70B 医学诊断模型](https://docs.alayanew.com/docs/documents/bestPractice/bigModel/llama70B/?utm_source=LLaMA-Factory)(中文)
|
||||
- [使用 LLaMA-Factory 微调 Qwen2.5-VL 实现自动驾驶场景微调](https://docs.alayanew.com/docs/documents/useGuide/LLaMAFactory/mutiple/?utm_source=LLaMA-Factory)(中文)
|
||||
- [LLaMA Factory:微调 DeepSeek-R1-Distill-Qwen-7B 模型实现新闻标题分类器](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory_deepseek_r1_distill_7b)(中文)
|
||||
- [基于 Amazon SageMaker 和 LLaMA-Factory 打造一站式无代码模型微调部署平台 Model Hub](https://aws.amazon.com/cn/blogs/china/a-one-stop-code-free-model-fine-tuning-deployment-platform-based-on-sagemaker-and-llama-factory/)(中文)
|
||||
- [LLaMA Factory 多模态微调实践:微调 Qwen2-VL 构建文旅大模型](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory_qwen2vl)(中文)
|
||||
- [LLaMA Factory:微调LLaMA3模型实现角色扮演](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory)(中文)
|
||||
- [LLaMA Factory:微调 Llama3 模型实现角色扮演](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory)(中文)
|
||||
|
||||
</details>
|
||||
|
||||
## 更新日志
|
||||
|
||||
[25/08/22] 我们支持了 **[OFT](https://arxiv.org/abs/2306.07280)** 和 **[OFTv2](https://arxiv.org/abs/2506.19847)** 模型的微调。查看 [examples](examples/README.md) 以使用。
|
||||
|
||||
[25/08/20] 我们支持了 **[Intern-S1-mini](https://huggingface.co/internlm/Intern-S1-mini)** 模型的微调。查看 [PR #8976](https://github.com/hiyouga/LLaMA-Factory/pull/8976) 以使用。
|
||||
|
||||
[25/08/06] 我们支持了 **[GPT-OSS](https://github.com/openai/gpt-oss)** 模型的微调。查看 [PR #8826](https://github.com/hiyouga/LLaMA-Factory/pull/8826) 以使用。
|
||||
|
||||
<details><summary>展开日志</summary>
|
||||
|
||||
[25/07/02] 我们支持了 **[GLM-4.1V-9B-Thinking](https://github.com/THUDM/GLM-4.1V-Thinking)** 模型的微调。
|
||||
|
||||
[25/04/28] 我们支持了 **[Qwen3](https://qwenlm.github.io/blog/qwen3/)** 系列模型的微调。
|
||||
|
||||
[25/04/21] 我们支持了 **[Muon](https://github.com/KellerJordan/Muon)** 优化器。详细用法请参照 [examples](examples/README_zh.md)。感谢 [@tianshijing](https://github.com/tianshijing) 的 PR。
|
||||
@@ -132,8 +145,6 @@ https://github.com/user-attachments/assets/43b700c6-a178-41db-b1f8-8190a5d3fcfc
|
||||
|
||||
[25/04/06] 我们支持了 **[Llama 4](https://ai.meta.com/blog/llama-4-multimodal-intelligence/)** 模型的微调。查看 [PR #7611](https://github.com/hiyouga/LLaMA-Factory/pull/7611) 以使用。
|
||||
|
||||
<details><summary>展开日志</summary>
|
||||
|
||||
[25/03/31] 我们支持了 **[Qwen2.5 Omni](https://qwenlm.github.io/blog/qwen2.5-omni/)** 模型的微调。查看 [PR #7537](https://github.com/hiyouga/LLaMA-Factory/pull/7537) 以使用。
|
||||
|
||||
[25/03/15] 我们支持了 **[SGLang](https://github.com/sgl-project/sglang)** 推理后端,请使用 `infer_backend: sglang` 启用。
|
||||
@@ -253,60 +264,70 @@ https://github.com/user-attachments/assets/43b700c6-a178-41db-b1f8-8190a5d3fcfc
|
||||
|
||||
## 模型
|
||||
|
||||
| 模型名 | 参数量 | Template |
|
||||
| ----------------------------------------------------------------- | -------------------------------- | ------------------- |
|
||||
| [Baichuan 2](https://huggingface.co/baichuan-inc) | 7B/13B | baichuan2 |
|
||||
| [BLOOM/BLOOMZ](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | - |
|
||||
| [ChatGLM3](https://huggingface.co/THUDM) | 6B | chatglm3 |
|
||||
| [Command R](https://huggingface.co/CohereForAI) | 35B/104B | cohere |
|
||||
| [DeepSeek (Code/MoE)](https://huggingface.co/deepseek-ai) | 7B/16B/67B/236B | deepseek |
|
||||
| [DeepSeek 2.5/3](https://huggingface.co/deepseek-ai) | 236B/671B | deepseek3 |
|
||||
| [DeepSeek R1 (Distill)](https://huggingface.co/deepseek-ai) | 1.5B/7B/8B/14B/32B/70B/671B | deepseekr1 |
|
||||
| [Falcon](https://huggingface.co/tiiuae) | 7B/11B/40B/180B | falcon |
|
||||
| [Gemma/Gemma 2/CodeGemma](https://huggingface.co/google) | 2B/7B/9B/27B | gemma |
|
||||
| [Gemma 3](https://huggingface.co/google) | 1B/4B/12B/27B | gemma3/gemma (1B) |
|
||||
| [GLM-4/GLM-4-0414/GLM-Z1](https://huggingface.co/THUDM) | 9B/32B | glm4/glmz1 |
|
||||
| [GPT-2](https://huggingface.co/openai-community) | 0.1B/0.4B/0.8B/1.5B | - |
|
||||
| [Granite 3.0-3.3](https://huggingface.co/ibm-granite) | 1B/2B/3B/8B | granite3 |
|
||||
| [Hunyuan](https://huggingface.co/tencent/) | 7B | hunyuan |
|
||||
| [Index](https://huggingface.co/IndexTeam) | 1.9B | index |
|
||||
| [InternLM 2-3](https://huggingface.co/internlm) | 7B/8B/20B | intern2 |
|
||||
| [InternVL 2.5-3](https://huggingface.co/OpenGVLab) | 1B/2B/8B/14B/38B/78B | intern_vl |
|
||||
| [Kimi-VL](https://huggingface.co/moonshotai) | 16B | kimi_vl |
|
||||
| [Llama](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - |
|
||||
| [Llama 2](https://huggingface.co/meta-llama) | 7B/13B/70B | llama2 |
|
||||
| [Llama 3-3.3](https://huggingface.co/meta-llama) | 1B/3B/8B/70B | llama3 |
|
||||
| [Llama 4](https://huggingface.co/meta-llama) | 109B/402B | llama4 |
|
||||
| [Llama 3.2 Vision](https://huggingface.co/meta-llama) | 11B/90B | mllama |
|
||||
| [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | llava |
|
||||
| [LLaVA-NeXT](https://huggingface.co/llava-hf) | 7B/8B/13B/34B/72B/110B | llava_next |
|
||||
| [LLaVA-NeXT-Video](https://huggingface.co/llava-hf) | 7B/34B | llava_next_video |
|
||||
| [MiMo](https://huggingface.co/XiaomiMiMo) | 7B | mimo |
|
||||
| [MiniCPM](https://huggingface.co/openbmb) | 0.5B/1B/2B/4B/8B | cpm/cpm3/cpm4 |
|
||||
| [MiniCPM-o-2.6/MiniCPM-V-2.6](https://huggingface.co/openbmb) | 8B | minicpm_o/minicpm_v |
|
||||
| [Ministral/Mistral-Nemo](https://huggingface.co/mistralai) | 8B/12B | ministral |
|
||||
| [Mistral/Mixtral](https://huggingface.co/mistralai) | 7B/8x7B/8x22B | mistral |
|
||||
| [Mistral Small](https://huggingface.co/mistralai) | 24B | mistral_small |
|
||||
| [OLMo](https://huggingface.co/allenai) | 1B/7B | - |
|
||||
| [PaliGemma/PaliGemma2](https://huggingface.co/google) | 3B/10B/28B | paligemma |
|
||||
| [Phi-1.5/Phi-2](https://huggingface.co/microsoft) | 1.3B/2.7B | - |
|
||||
| [Phi-3/Phi-3.5](https://huggingface.co/microsoft) | 4B/14B | phi |
|
||||
| [Phi-3-small](https://huggingface.co/microsoft) | 7B | phi_small |
|
||||
| [Phi-4](https://huggingface.co/microsoft) | 14B | phi4 |
|
||||
| [Pixtral](https://huggingface.co/mistralai) | 12B | pixtral |
|
||||
| [Qwen (1-2.5) (Code/Math/MoE/QwQ)](https://huggingface.co/Qwen) | 0.5B/1.5B/3B/7B/14B/32B/72B/110B | qwen |
|
||||
| [Qwen3 (MoE)](https://huggingface.co/Qwen) | 0.6B/1.7B/4B/8B/14B/32B/235B | qwen3 |
|
||||
| [Qwen2-Audio](https://huggingface.co/Qwen) | 7B | qwen2_audio |
|
||||
| [Qwen2.5-Omni](https://huggingface.co/Qwen) | 3B/7B | qwen2_omni |
|
||||
| [Qwen2-VL/Qwen2.5-VL/QVQ](https://huggingface.co/Qwen) | 2B/3B/7B/32B/72B | qwen2_vl |
|
||||
| [Seed Coder](https://huggingface.co/ByteDance-Seed) | 8B | seed_coder |
|
||||
| [Skywork o1](https://huggingface.co/Skywork) | 8B | skywork_o1 |
|
||||
| [StarCoder 2](https://huggingface.co/bigcode) | 3B/7B/15B | - |
|
||||
| [TeleChat2](https://huggingface.co/Tele-AI) | 3B/7B/35B/115B | telechat2 |
|
||||
| [XVERSE](https://huggingface.co/xverse) | 7B/13B/65B | xverse |
|
||||
| [Yi/Yi-1.5 (Code)](https://huggingface.co/01-ai) | 1.5B/6B/9B/34B | yi |
|
||||
| [Yi-VL](https://huggingface.co/01-ai) | 6B/34B | yi_vl |
|
||||
| [Yuan 2](https://huggingface.co/IEITYuan) | 2B/51B/102B | yuan |
|
||||
| 模型名 | 参数量 | Template |
|
||||
| ----------------------------------------------------------------- | -------------------------------- | -------------------- |
|
||||
| [Baichuan 2](https://huggingface.co/baichuan-inc) | 7B/13B | baichuan2 |
|
||||
| [BLOOM/BLOOMZ](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | - |
|
||||
| [ChatGLM3](https://huggingface.co/THUDM) | 6B | chatglm3 |
|
||||
| [Command R](https://huggingface.co/CohereForAI) | 35B/104B | cohere |
|
||||
| [DeepSeek (Code/MoE)](https://huggingface.co/deepseek-ai) | 7B/16B/67B/236B | deepseek |
|
||||
| [DeepSeek 2.5/3](https://huggingface.co/deepseek-ai) | 236B/671B | deepseek3 |
|
||||
| [DeepSeek R1 (Distill)](https://huggingface.co/deepseek-ai) | 1.5B/7B/8B/14B/32B/70B/671B | deepseekr1 |
|
||||
| [ERNIE-4.5](https://huggingface.co/baidu) | 0.3B/21B/300B | ernie/ernie_nothink |
|
||||
| [Falcon](https://huggingface.co/tiiuae) | 7B/11B/40B/180B | falcon |
|
||||
| [Falcon-H1](https://huggingface.co/tiiuae) | 0.5B/1.5B/3B/7B/34B | falcon_h1 |
|
||||
| [Gemma/Gemma 2/CodeGemma](https://huggingface.co/google) | 2B/7B/9B/27B | gemma/gemma2 |
|
||||
| [Gemma 3/Gemma 3n](https://huggingface.co/google) | 270M/1B/4B/6B/8B/12B/27B | gemma3/gemma3n |
|
||||
| [GLM-4/GLM-4-0414/GLM-Z1](https://huggingface.co/zai-org) | 9B/32B | glm4/glmz1 |
|
||||
| [GLM-4.1V](https://huggingface.co/zai-org) | 9B | glm4v |
|
||||
| [GLM-4.5/GLM-4.5V](https://huggingface.co/zai-org) | 106B/355B | glm4_moe/glm4v_moe |
|
||||
| [GPT-2](https://huggingface.co/openai-community) | 0.1B/0.4B/0.8B/1.5B | - |
|
||||
| [GPT-OSS](https://huggingface.co/openai) | 20B/120B | gpt |
|
||||
| [Granite 3.0-3.3](https://huggingface.co/ibm-granite) | 1B/2B/3B/8B | granite3 |
|
||||
| [Granite 4](https://huggingface.co/ibm-granite) | 7B | granite4 |
|
||||
| [Hunyuan](https://huggingface.co/tencent/) | 7B | hunyuan |
|
||||
| [Index](https://huggingface.co/IndexTeam) | 1.9B | index |
|
||||
| [InternLM 2-3](https://huggingface.co/internlm) | 7B/8B/20B | intern2 |
|
||||
| [InternVL 2.5-3.5](https://huggingface.co/OpenGVLab) | 1B/2B/4B/8B/14B/30B/38B/78B/241B | intern_vl |
|
||||
| [InternLM/Intern-S1-mini](https://huggingface.co/internlm/) | 8B | intern_s1 |
|
||||
| [Kimi-VL](https://huggingface.co/moonshotai) | 16B | kimi_vl |
|
||||
| [Ling 2.0 (mini/flash)](https://huggingface.co/inclusionAI) | 16B/100B | bailing_v2 |
|
||||
| [Llama](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - |
|
||||
| [Llama 2](https://huggingface.co/meta-llama) | 7B/13B/70B | llama2 |
|
||||
| [Llama 3-3.3](https://huggingface.co/meta-llama) | 1B/3B/8B/70B | llama3 |
|
||||
| [Llama 4](https://huggingface.co/meta-llama) | 109B/402B | llama4 |
|
||||
| [Llama 3.2 Vision](https://huggingface.co/meta-llama) | 11B/90B | mllama |
|
||||
| [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | llava |
|
||||
| [LLaVA-NeXT](https://huggingface.co/llava-hf) | 7B/8B/13B/34B/72B/110B | llava_next |
|
||||
| [LLaVA-NeXT-Video](https://huggingface.co/llava-hf) | 7B/34B | llava_next_video |
|
||||
| [MiMo](https://huggingface.co/XiaomiMiMo) | 7B | mimo |
|
||||
| [MiniCPM 1-4.1](https://huggingface.co/openbmb) | 0.5B/1B/2B/4B/8B | cpm/cpm3/cpm4 |
|
||||
| [MiniCPM-o-2.6/MiniCPM-V-2.6](https://huggingface.co/openbmb) | 8B | minicpm_o/minicpm_v |
|
||||
| [Ministral/Mistral-Nemo](https://huggingface.co/mistralai) | 8B/12B | ministral |
|
||||
| [Mistral/Mixtral](https://huggingface.co/mistralai) | 7B/8x7B/8x22B | mistral |
|
||||
| [Mistral Small](https://huggingface.co/mistralai) | 24B | mistral_small |
|
||||
| [OLMo](https://huggingface.co/allenai) | 1B/7B | - |
|
||||
| [PaliGemma/PaliGemma2](https://huggingface.co/google) | 3B/10B/28B | paligemma |
|
||||
| [Phi-1.5/Phi-2](https://huggingface.co/microsoft) | 1.3B/2.7B | - |
|
||||
| [Phi-3/Phi-3.5](https://huggingface.co/microsoft) | 4B/14B | phi |
|
||||
| [Phi-3-small](https://huggingface.co/microsoft) | 7B | phi_small |
|
||||
| [Phi-4](https://huggingface.co/microsoft) | 14B | phi4 |
|
||||
| [Pixtral](https://huggingface.co/mistralai) | 12B | pixtral |
|
||||
| [Qwen (1-2.5) (Code/Math/MoE/QwQ)](https://huggingface.co/Qwen) | 0.5B/1.5B/3B/7B/14B/32B/72B/110B | qwen |
|
||||
| [Qwen3 (MoE/Instruct/Thinking/Next)](https://huggingface.co/Qwen) | 0.6B/1.7B/4B/8B/14B/32B/80B/235B | qwen3/qwen3_nothink |
|
||||
| [Qwen2-Audio](https://huggingface.co/Qwen) | 7B | qwen2_audio |
|
||||
| [Qwen2.5-Omni](https://huggingface.co/Qwen) | 3B/7B | qwen2_omni |
|
||||
| [Qwen3-Omni](https://huggingface.co/Qwen)* | 30B | qwen3_omni |
|
||||
| [Qwen2-VL/Qwen2.5-VL/QVQ](https://huggingface.co/Qwen) | 2B/3B/7B/32B/72B | qwen2_vl |
|
||||
| [Qwen3-VL](https://huggingface.co/Qwen)* | 235B | qwen3_vl |
|
||||
| [Seed (OSS/Coder)](https://huggingface.co/ByteDance-Seed) | 8B/36B | seed_oss/seed_coder |
|
||||
| [Skywork o1](https://huggingface.co/Skywork) | 8B | skywork_o1 |
|
||||
| [StarCoder 2](https://huggingface.co/bigcode) | 3B/7B/15B | - |
|
||||
| [TeleChat2](https://huggingface.co/Tele-AI) | 3B/7B/35B/115B | telechat2 |
|
||||
| [XVERSE](https://huggingface.co/xverse) | 7B/13B/65B | xverse |
|
||||
| [Yi/Yi-1.5 (Code)](https://huggingface.co/01-ai) | 1.5B/6B/9B/34B | yi |
|
||||
| [Yi-VL](https://huggingface.co/01-ai) | 6B/34B | yi_vl |
|
||||
| [Yuan 2](https://huggingface.co/IEITYuan) | 2B/51B/102B | yuan |
|
||||
|
||||
> [!NOTE]
|
||||
> 对于所有“基座”(Base)模型,`template` 参数可以是 `default`, `alpaca`, `vicuna` 等任意值。但“对话”(Instruct/Chat)模型请务必使用**对应的模板**。
|
||||
@@ -350,6 +371,11 @@ https://github.com/user-attachments/assets/43b700c6-a178-41db-b1f8-8190a5d3fcfc
|
||||
- [SkyPile (zh)](https://huggingface.co/datasets/Skywork/SkyPile-150B)
|
||||
- [FineWeb (en)](https://huggingface.co/datasets/HuggingFaceFW/fineweb)
|
||||
- [FineWeb-Edu (en)](https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu)
|
||||
- [CCI3-HQ (zh)](https://huggingface.co/datasets/BAAI/CCI3-HQ)
|
||||
- [CCI3-Data (zh)](https://huggingface.co/datasets/BAAI/CCI3-Data)
|
||||
- [CCI4.0-M2-Base-v1 (en&zh)](https://huggingface.co/datasets/BAAI/CCI4.0-M2-Base-v1)
|
||||
- [CCI4.0-M2-CoT-v1 (en&zh)](https://huggingface.co/datasets/BAAI/CCI4.0-M2-CoT-v1)
|
||||
- [CCI4.0-M2-Extra-v1 (en&zh)](https://huggingface.co/datasets/BAAI/CCI4.0-M2-Extra-v1)
|
||||
- [The Stack (en)](https://huggingface.co/datasets/bigcode/the-stack)
|
||||
- [StarCoder (en)](https://huggingface.co/datasets/bigcode/starcoderdata)
|
||||
|
||||
@@ -387,6 +413,7 @@ https://github.com/user-attachments/assets/43b700c6-a178-41db-b1f8-8190a5d3fcfc
|
||||
- [ShareGPT Hyperfiltered (en)](https://huggingface.co/datasets/totally-not-an-llm/sharegpt-hyperfiltered-3k)
|
||||
- [ShareGPT4 (en&zh)](https://huggingface.co/datasets/shibing624/sharegpt_gpt4)
|
||||
- [UltraChat 200k (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrachat_200k)
|
||||
- [Infinity Instruct (zh)](https://huggingface.co/datasets/BAAI/Infinity-Instruct)
|
||||
- [AgentInstruct (en)](https://huggingface.co/datasets/THUDM/AgentInstruct)
|
||||
- [LMSYS Chat 1M (en)](https://huggingface.co/datasets/lmsys/lmsys-chat-1m)
|
||||
- [Evol Instruct V2 (en)](https://huggingface.co/datasets/WizardLM/WizardLM_evol_instruct_V2_196k)
|
||||
@@ -445,7 +472,7 @@ huggingface-cli login
|
||||
| python | 3.9 | 3.10 |
|
||||
| torch | 2.0.0 | 2.6.0 |
|
||||
| torchvision | 0.15.0 | 0.21.0 |
|
||||
| transformers | 4.45.0 | 4.50.0 |
|
||||
| transformers | 4.49.0 | 4.50.0 |
|
||||
| datasets | 2.16.0 | 3.2.0 |
|
||||
| accelerate | 0.34.0 | 1.2.1 |
|
||||
| peft | 0.14.0 | 0.15.1 |
|
||||
@@ -487,7 +514,7 @@ cd LLaMA-Factory
|
||||
pip install -e ".[torch,metrics]" --no-build-isolation
|
||||
```
|
||||
|
||||
可选的额外依赖项:torch、torch-npu、metrics、deepspeed、liger-kernel、bitsandbytes、hqq、eetq、gptq、aqlm、vllm、sglang、galore、apollo、badam、adam-mini、qwen、minicpm_v、modelscope、openmind、swanlab、dev
|
||||
可选的额外依赖项:torch、torch-npu、metrics、deepspeed、liger-kernel、bitsandbytes、hqq、eetq、gptq、aqlm、vllm、sglang、galore、apollo、badam、adam-mini、qwen、minicpm_v、openmind、swanlab、dev
|
||||
|
||||
#### 从镜像安装
|
||||
|
||||
@@ -622,7 +649,7 @@ pip install .
|
||||
> [!NOTE]
|
||||
> 使用自定义数据集时,请更新 `data/dataset_info.json` 文件。
|
||||
|
||||
您也可以使用 **[Easy Dataset](https://github.com/ConardLi/easy-dataset)** 或 **[GraphGen](https://github.com/open-sciencelab/GraphGen)** 构建用于微调的合成数据。
|
||||
您也可以使用 **[Easy Dataset](https://github.com/ConardLi/easy-dataset)**、**[DataFlow](https://github.com/OpenDCAI/DataFlow)** 和 **[GraphGen](https://github.com/open-sciencelab/GraphGen)** 构建用于微调的合成数据。
|
||||
|
||||
### 快速开始
|
||||
|
||||
@@ -647,6 +674,10 @@ llamafactory-cli export examples/merge_lora/llama3_lora_sft.yaml
|
||||
llamafactory-cli webui
|
||||
```
|
||||
|
||||
### LLaMA Factory Online 在线微调
|
||||
|
||||
详情阅读该[文档](https://docs.llamafactory.com.cn/docs/documents/quickstart/getstarted/?utm_source=LLaMA-Factory)。
|
||||
|
||||
### 构建 Docker
|
||||
|
||||
CUDA 用户:
|
||||
|
||||
|
Before Width: | Height: | Size: 47 KiB |
1
assets/sponsors/serpapi.svg
Normal file
|
After Width: | Height: | Size: 6.0 KiB |
1
assets/thirdparty/colab.svg
vendored
Normal file
@@ -0,0 +1 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="117" height="20"><linearGradient id="b" x2="0" y2="100%"><stop offset="0" stop-color="#bbb" stop-opacity=".1"/><stop offset="1" stop-opacity=".1"/></linearGradient><clipPath id="a"><rect width="117" height="20" rx="3" fill="#fff"/></clipPath><g clip-path="url(#a)"><path fill="#555" d="M0 0h30v20H0z"/><path fill="#007ec6" d="M30 0h87v20H30z"/><path fill="url(#b)" d="M0 0h117v20H0z"/></g><g fill="#fff" text-anchor="middle" font-family="DejaVu Sans,Verdana,Geneva,sans-serif" font-size="110"><svg x="4px" y="0px" width="22px" height="20px" viewBox="-2 0 28 24" style="background-color: #fff;border-radius: 1px;"><path style="fill:#e8710a;" d="M1.977,16.77c-2.667-2.277-2.605-7.079,0-9.357C2.919,8.057,3.522,9.075,4.49,9.691c-1.152,1.6-1.146,3.201-0.004,4.803C3.522,15.111,2.918,16.126,1.977,16.77z"/><path style="fill:#f9ab00;" d="M12.257,17.114c-1.767-1.633-2.485-3.658-2.118-6.02c0.451-2.91,2.139-4.893,4.946-5.678c2.565-0.718,4.964-0.217,6.878,1.819c-0.884,0.743-1.707,1.547-2.434,2.446C18.488,8.827,17.319,8.435,16,8.856c-2.404,0.767-3.046,3.241-1.494,5.644c-0.241,0.275-0.493,0.541-0.721,0.826C13.295,15.939,12.511,16.3,12.257,17.114z"/><path style="fill:#e8710a;" d="M19.529,9.682c0.727-0.899,1.55-1.703,2.434-2.446c2.703,2.783,2.701,7.031-0.005,9.764c-2.648,2.674-6.936,2.725-9.701,0.115c0.254-0.814,1.038-1.175,1.528-1.788c0.228-0.285,0.48-0.552,0.721-0.826c1.053,0.916,2.254,1.268,3.6,0.83C20.502,14.551,21.151,11.927,19.529,9.682z"/><path style="fill:#f9ab00;" d="M4.49,9.691C3.522,9.075,2.919,8.057,1.977,7.413c2.209-2.398,5.721-2.942,8.476-1.355c0.555,0.32,0.719,0.606,0.285,1.128c-0.157,0.188-0.258,0.422-0.391,0.631c-0.299,0.47-0.509,1.067-0.929,1.371C8.933,9.539,8.523,8.847,8.021,8.746C6.673,8.475,5.509,8.787,4.49,9.691z"/><path style="fill:#f9ab00;" d="M1.977,16.77c0.941-0.644,1.545-1.659,2.509-2.277c1.373,1.152,2.85,1.433,4.45,0.499c0.332-0.194,0.503-0.088,0.673,0.19c0.386,0.635,0.753,1.285,1.181,1.89c0.34,0.48,0.222,0.715-0.253,1.006C7.84,19.73,4.205,19.188,1.977,16.77z"/></svg><text x="245" y="140" transform="scale(.1)" textLength="30"> </text><text x="725" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)" textLength="770">Open in Colab</text><text x="725" y="140" transform="scale(.1)" textLength="770">Open in Colab</text></g> </svg>
|
||||
|
After Width: | Height: | Size: 2.3 KiB |
1
assets/thirdparty/discord.svg
vendored
Normal file
@@ -0,0 +1 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="115" height="20" role="img" aria-label="LLaMA Factory"><title>LLaMA Factory</title><linearGradient id="s" x2="0" y2="100%"><stop offset="0" stop-color="#bbb" stop-opacity=".1"/><stop offset="1" stop-opacity=".1"/></linearGradient><clipPath id="r"><rect width="115" height="20" rx="3" fill="#fff"/></clipPath><g clip-path="url(#r)"><rect width="24" height="20" fill="#5865f2"/><rect x="24" width="91" height="20" fill="#555"/><rect width="115" height="20" fill="url(#s)"/></g><g fill="#fff" text-anchor="middle" font-family="Verdana,Geneva,DejaVu Sans,sans-serif" text-rendering="geometricPrecision" font-size="110"><image x="5" y="3" width="14" height="14" xlink:href="data:image/svg+xml;base64,PHN2ZyBmaWxsPSJ3aGl0ZSIgcm9sZT0iaW1nIiB2aWV3Qm94PSIwIDAgMjQgMjQiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PHRpdGxlPkRpc2NvcmQ8L3RpdGxlPjxwYXRoIGQ9Ik0yMC4zMTcgNC4zNjk4YTE5Ljc5MTMgMTkuNzkxMyAwIDAwLTQuODg1MS0xLjUxNTIuMDc0MS4wNzQxIDAgMDAtLjA3ODUuMDM3MWMtLjIxMS4zNzUzLS40NDQ3Ljg2NDgtLjYwODMgMS4yNDk1LTEuODQ0Ny0uMjc2Mi0zLjY4LS4yNzYyLTUuNDg2OCAwLS4xNjM2LS4zOTMzLS40MDU4LS44NzQyLS42MTc3LTEuMjQ5NWEuMDc3LjA3NyAwIDAwLS4wNzg1LS4wMzcgMTkuNzM2MyAxOS43MzYzIDAgMDAtNC44ODUyIDEuNTE1LjA2OTkuMDY5OSAwIDAwLS4wMzIxLjAyNzdDLjUzMzQgOS4wNDU4LS4zMTkgMTMuNTc5OS4wOTkyIDE4LjA1NzhhLjA4MjQuMDgyNCAwIDAwLjAzMTIuMDU2MWMyLjA1MjggMS41MDc2IDQuMDQxMyAyLjQyMjggNS45OTI5IDMuMDI5NGEuMDc3Ny4wNzc3IDAgMDAuMDg0Mi0uMDI3NmMuNDYxNi0uNjMwNC44NzMxLTEuMjk1MiAxLjIyNi0xLjk5NDJhLjA3Ni4wNzYgMCAwMC0uMDQxNi0uMTA1N2MtLjY1MjgtLjI0NzYtMS4yNzQzLS41NDk1LTEuODcyMi0uODkyM2EuMDc3LjA3NyAwIDAxLS4wMDc2LS4xMjc3Yy4xMjU4LS4wOTQzLjI1MTctLjE5MjMuMzcxOC0uMjkxNGEuMDc0My4wNzQzIDAgMDEuMDc3Ni0uMDEwNWMzLjkyNzggMS43OTMzIDguMTggMS43OTMzIDEyLjA2MTQgMGEuMDczOS4wNzM5IDAgMDEuMDc4NS4wMDk1Yy4xMjAyLjA5OS4yNDYuMTk4MS4zNzI4LjI5MjRhLjA3Ny4wNzcgMCAwMS0uMDA2Ni4xMjc2IDEyLjI5ODYgMTIuMjk4NiAwIDAxLTEuODczLjg5MTQuMDc2Ni4wNzY2IDAgMDAtLjA0MDcuMTA2N2MuMzYwNC42OTguNzcxOSAxLjM2MjggMS4yMjUgMS45OTMyYS4wNzYuMDc2IDAgMDAuMDg0Mi4wMjg2YzEuOTYxLS42MDY3IDMuOTQ5NS0xLjUyMTkgNi4wMDIzLTMuMDI5NGEuMDc3LjA3NyAwIDAwLjAzMTMtLjA1NTJjLjUwMDQtNS4xNzctLjgzODItOS42NzM5LTMuNTQ4NS0xMy42NjA0YS4wNjEuMDYxIDAgMDAtLjAzMTItLjAyODZ6TTguMDIgMTUuMzMxMmMtMS4xODI1IDAtMi4xNTY5LTEuMDg1Ny0yLjE1NjktMi40MTkgMC0xLjMzMzIuOTU1NS0yLjQxODkgMi4xNTctMi40MTg5IDEuMjEwOCAwIDIuMTc1NyAxLjA5NTIgMi4xNTY4IDIuNDE5IDAgMS4zMzMyLS45NTU1IDIuNDE4OS0yLjE1NjkgMi40MTg5em03Ljk3NDggMGMtMS4xODI1IDAtMi4xNTY5LTEuMDg1Ny0yLjE1NjktMi40MTkgMC0xLjMzMzIuOTU1NC0yLjQxODkgMi4xNTY5LTIuNDE4OSAxLjIxMDggMCAyLjE3NTcgMS4wOTUyIDIuMTU2OCAyLjQxOSAwIDEuMzMzMi0uOTQ2IDIuNDE4OS0yLjE1NjggMi40MTg5WiIvPjwvc3ZnPg=="/><text aria-hidden="true" x="685" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)" textLength="810">LLaMA Factory</text><text x="685" y="140" transform="scale(.1)" fill="#fff" textLength="810">LLaMA Factory</text></g></svg>
|
||||
|
After Width: | Height: | Size: 2.8 KiB |
92
assets/thirdparty/dsw.svg
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<svg width="130px" height="20px" viewBox="0 0 130 20" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
|
||||
<title>最终方案备份 6</title>
|
||||
<defs>
|
||||
<linearGradient x1="50%" y1="-11.4084043%" x2="50%" y2="107.220074%" id="linearGradient-1">
|
||||
<stop stop-color="#FF7717" offset="0%"></stop>
|
||||
<stop stop-color="#FF4707" offset="100%"></stop>
|
||||
</linearGradient>
|
||||
<linearGradient x1="50%" y1="0%" x2="50%" y2="107.220074%" id="linearGradient-2">
|
||||
<stop stop-color="#FFFFFF" stop-opacity="0.15" offset="0%"></stop>
|
||||
<stop stop-color="#FFFFFF" stop-opacity="0.05" offset="100%"></stop>
|
||||
</linearGradient>
|
||||
<path d="M6.45596591,3.88139205 C6.45596591,4.65080492 6.31628788,5.31486742 6.03693182,5.87357955 C5.75757576,6.43229167 5.37523674,6.86257102 4.88991477,7.16441761 C4.4045928,7.4662642 3.85061553,7.6171875 3.22798295,7.6171875 C2.60535038,7.6171875 2.05078125,7.4662642 1.56427557,7.16441761 C1.07776989,6.86257102 0.695430871,6.43229167 0.417258523,5.87357955 C0.139086174,5.31486742 0,4.65080492 0,3.88139205 C0,3.11197917 0.139086174,2.44791667 0.417258523,1.88920455 C0.695430871,1.33049242 1.07776989,0.900213068 1.56427557,0.598366477 C2.05078125,0.296519886 2.60535038,0.145596591 3.22798295,0.145596591 C3.85061553,0.145596591 4.4045928,0.296519886 4.88991477,0.598366477 C5.37523674,0.900213068 5.75757576,1.33049242 6.03693182,1.88920455 C6.31628788,2.44791667 6.45596591,3.11197917 6.45596591,3.88139205 Z M5.53622159,3.88139205 C5.53622159,3.26349432 5.43442235,2.74147727 5.23082386,2.31534091 C5.02722538,1.88920455 4.7508286,1.56664299 4.40163352,1.34765625 C4.05243845,1.12866951 3.66122159,1.01917614 3.22798295,1.01917614 C2.79237689,1.01917614 2.40056818,1.12866951 2.05255682,1.34765625 C1.70454545,1.56664299 1.42814867,1.88920455 1.22336648,2.31534091 C1.01858428,2.74147727 0.916193182,3.26349432 0.916193182,3.88139205 C0.916193182,4.49928977 1.01858428,5.02130682 1.22336648,5.44744318 C1.42814867,5.87357955 1.70454545,6.1961411 2.05255682,6.41512784 C2.40056818,6.63411458 2.79237689,6.74360795 3.22798295,6.74360795 C3.66122159,6.74360795 4.05243845,6.63411458 4.40163352,6.41512784 C4.7508286,6.1961411 5.02722538,5.87357955 5.23082386,5.44744318 C5.43442235,5.02130682 5.53622159,4.49928977 5.53622159,3.88139205 Z" id="path-3"></path>
|
||||
<path d="M8.59629794,9.56321023 L8.59629794,2.06321023 L9.46987749,2.06321023 L9.46987749,2.93323864 L9.56575817,2.93323864 C9.6273112,2.83380682 9.71372218,2.7101089 9.82499112,2.56214489 C9.93626006,2.41418087 10.0954693,2.28219697 10.302619,2.16619318 C10.5097686,2.05018939 10.7885328,1.9921875 11.1389116,1.9921875 C11.593457,1.9921875 11.9953273,2.10582386 12.3445224,2.33309659 C12.6937174,2.56036932 12.9665631,2.88470644 13.1630593,3.30610795 C13.3595555,3.72750947 13.4578036,4.22703598 13.4578036,4.8046875 C13.4578036,5.38470644 13.3595555,5.88600852 13.1630593,6.30859375 C12.9665631,6.73117898 12.6949012,7.05669981 12.3480735,7.28515625 C12.0012459,7.51361269 11.6029267,7.62784091 11.1531161,7.62784091 C10.8074722,7.62784091 10.5298917,7.57043087 10.3203746,7.4556108 C10.1108576,7.34079072 9.94809718,7.20821496 9.83209339,7.05788352 C9.71608961,6.90755208 9.6273112,6.78030303 9.56575817,6.67613636 L9.49828658,6.67613636 L9.49828658,9.56321023 L8.59629794,9.56321023 Z M9.48053089,4.79048295 C9.48053089,5.19294508 9.5397165,5.54805871 9.65808771,5.85582386 C9.77645893,6.16358902 9.94868904,6.40329072 10.1747781,6.57492898 C10.4008671,6.74656723 10.6772638,6.83238636 11.0039684,6.83238636 C11.3448775,6.83238636 11.6301521,6.74183239 11.8597923,6.56072443 C12.0894324,6.37961648 12.2622544,6.13458807 12.3782582,5.8256392 C12.494262,5.51669034 12.5522638,5.17163826 12.5522638,4.79048295 C12.5522638,4.4140625 12.4954457,4.07433712 12.3818093,3.77130682 C12.2681729,3.46827652 12.0965347,3.22857481 11.8668945,3.0522017 C11.6372544,2.8758286 11.3496123,2.78764205 11.0039684,2.78764205 C10.672529,2.78764205 10.3937648,2.87168561 10.1676758,3.03977273 C9.94158677,3.20785985 9.77054036,3.44164299 9.65453658,3.74112216 C9.53853279,4.04060133 9.48053089,4.39038826 9.48053089,4.79048295 Z" id="path-4"></path>
|
||||
<path d="M17.8164584,7.63139205 C17.2885228,7.63139205 16.8333855,7.5147964 16.4510465,7.28160511 C16.0687075,7.04841383 15.774555,6.72170928 15.5685891,6.30149148 C15.3626232,5.88127367 15.2596402,5.39180871 15.2596402,4.83309659 C15.2596402,4.27201705 15.3626232,3.77840909 15.5685891,3.35227273 C15.774555,2.92613636 16.0627889,2.5929214 16.4332908,2.35262784 C16.8037927,2.11233428 17.2376232,1.9921875 17.7347823,1.9921875 C18.0259755,1.9921875 18.3112501,2.0407197 18.5906061,2.13778409 C18.8699622,2.23484848 19.1238684,2.38991477 19.3523249,2.60298295 C19.5807813,2.81605114 19.7618893,3.09659091 19.8956487,3.44460227 C20.0294082,3.79261364 20.096288,4.21875 20.096288,4.72301136 L20.096288,5.08522727 L15.8562311,5.08522727 L15.8562311,4.33948864 L19.1907482,4.33948864 C19.1907482,4.04119318 19.1309707,3.77485795 19.0114158,3.54048295 C18.8918609,3.30610795 18.7231819,3.12085701 18.5053789,2.98473011 C18.2875758,2.84860322 18.0307103,2.78053977 17.7347823,2.78053977 C17.4104451,2.78053977 17.1304972,2.85984848 16.8949385,3.01846591 C16.6593798,3.17708333 16.47768,3.3836411 16.3498391,3.6381392 C16.2219982,3.89263731 16.1580777,4.16666667 16.1580777,4.46022727 L16.1580777,4.98224432 C16.1580777,5.37997159 16.2279167,5.71732955 16.3675948,5.99431818 C16.5072728,6.27130682 16.7014016,6.48200758 16.9499811,6.62642045 C17.1985607,6.77083333 17.4885701,6.84303977 17.8200095,6.84303977 C18.0354451,6.84303977 18.2301658,6.81226326 18.4041715,6.75071023 C18.5781772,6.6891572 18.7291005,6.59682765 18.8569414,6.47372159 C18.9847823,6.35061553 19.0830304,6.19673295 19.1516857,6.01207386 L20.018163,6.22514205 C19.9329357,6.50686553 19.7891147,6.75307765 19.5866999,6.96377841 C19.3842851,7.17447917 19.1327463,7.3384233 18.8320834,7.4556108 C18.5314205,7.5727983 18.1928789,7.63139205 17.8164584,7.63139205 Z" id="path-5"></path>
|
||||
<path d="M23.0521052,4.24715909 L23.0521052,7.51775568 L22.1501165,7.51775568 L22.1501165,2.06321023 L23.0165938,2.06321023 L23.0165938,2.92613636 L23.0876165,2.92613636 C23.2154574,2.64441288 23.4107699,2.41832386 23.673554,2.24786932 C23.9363381,2.07741477 24.2736961,1.9921875 24.6856279,1.9921875 C25.0549461,1.9921875 25.3780995,2.06794508 25.6550881,2.21946023 C25.9320767,2.37097538 26.1481042,2.59883996 26.3031705,2.90305398 C26.4582368,3.20726799 26.5357699,3.59019886 26.5357699,4.05184659 L26.5357699,7.51775568 L25.6337813,7.51775568 L25.6337813,4.12642045 C25.6337813,3.70738636 25.5248798,3.38008996 25.3070767,3.14453125 C25.0892737,2.90897254 24.7897946,2.79119318 24.4086392,2.79119318 C24.1482226,2.79119318 23.9156232,2.84801136 23.710841,2.96164773 C23.5060588,3.07528409 23.3450739,3.24100379 23.2278864,3.45880682 C23.1106989,3.67660985 23.0521052,3.93939394 23.0521052,4.24715909 Z" id="path-6"></path>
|
||||
<path d="M32.4028209,7.51775568 L32.4028209,2.06321023 L33.3048096,2.06321023 L33.3048096,7.51775568 L32.4028209,7.51775568 Z M32.8609175,1.171875 C32.690463,1.171875 32.5448664,1.11446496 32.4241278,0.999644886 C32.3033891,0.884824811 32.2430198,0.746922348 32.2430198,0.5859375 C32.2430198,0.424952652 32.3033891,0.287050189 32.4241278,0.172230114 C32.5448664,0.0574100379 32.690463,0 32.8609175,0 C33.0290046,0 33.1734175,0.0574100379 33.2941562,0.172230114 C33.4148948,0.287050189 33.4752641,0.424952652 33.4752641,0.5859375 C33.4752641,0.746922348 33.4148948,0.884824811 33.2941562,0.999644886 C33.1734175,1.11446496 33.0290046,1.171875 32.8609175,1.171875 Z" id="path-7"></path>
|
||||
<path d="M36.5099554,4.24715909 L36.5099554,7.51775568 L35.6079668,7.51775568 L35.6079668,2.06321023 L36.474444,2.06321023 L36.474444,2.92613636 L36.5454668,2.92613636 C36.6733077,2.64441288 36.8686202,2.41832386 37.1314043,2.24786932 C37.3941883,2.07741477 37.7315463,1.9921875 38.1434781,1.9921875 C38.5127963,1.9921875 38.8359497,2.06794508 39.1129383,2.21946023 C39.389927,2.37097538 39.6059544,2.59883996 39.7610207,2.90305398 C39.916087,3.20726799 39.9936202,3.59019886 39.9936202,4.05184659 L39.9936202,7.51775568 L39.0916315,7.51775568 L39.0916315,4.12642045 C39.0916315,3.70738636 38.98273,3.38008996 38.764927,3.14453125 C38.5471239,2.90897254 38.2476448,2.79119318 37.8664895,2.79119318 C37.6060728,2.79119318 37.3734734,2.84801136 37.1686912,2.96164773 C36.963909,3.07528409 36.8029241,3.24100379 36.6857366,3.45880682 C36.5685491,3.67660985 36.5099554,3.93939394 36.5099554,4.24715909 Z" id="path-8"></path>
|
||||
<path d="M45.9636541,7.51775568 L45.9636541,0.245028409 L48.460103,0.245028409 C49.0306522,0.245028409 49.4988104,0.347419508 49.8645774,0.552201705 C50.2303445,0.756983902 50.5020064,1.03456439 50.6795632,1.38494318 C50.85712,1.73532197 50.9458984,2.12713068 50.9458984,2.56036932 C50.9458984,2.99597538 50.85712,3.38955966 50.6795632,3.74112216 C50.5020064,4.09268466 50.2309363,4.37204072 49.866353,4.57919034 C49.5017696,4.78633996 49.0365708,4.88991477 48.4707564,4.88991477 L46.7022905,4.88991477 L46.7022905,4.06605114 L48.4210405,4.06605114 C48.8021958,4.06605114 49.1093691,3.99976326 49.3425604,3.8671875 C49.5757517,3.73461174 49.7456143,3.55527936 49.8521484,3.32919034 C49.9586825,3.10310133 50.0119496,2.84682765 50.0119496,2.56036932 C50.0119496,2.27391098 49.9586825,2.01822917 49.8521484,1.79332386 C49.7456143,1.56841856 49.5745679,1.39204545 49.3390092,1.26420455 C49.1034505,1.13636364 48.7927261,1.07244318 48.4068359,1.07244318 L46.9082564,1.07244318 L46.9082564,7.51775568 L45.9636541,7.51775568 Z" id="path-9"></path>
|
||||
<path d="M52.7664895,7.51775568 L51.7757224,7.51775568 L54.4284213,0.245028409 L55.4404952,0.245028409 L58.093194,7.51775568 L57.102427,7.51775568 L54.9646429,1.4453125 L54.9078247,1.4453125 L52.7664895,7.51775568 Z M53.0647849,4.67684659 L56.8041315,4.67684659 L56.8041315,5.49715909 L53.0647849,5.49715909 L53.0647849,4.67684659 Z" id="path-10"></path>
|
||||
<polygon id="path-11" points="60.949413 0.245028409 60.949413 7.51775568 60.0048107 7.51775568 60.0048107 0.245028409"></polygon>
|
||||
<polygon id="path-12" points="66.5090177 3.46946023 66.5090177 4.29332386 63.3129949 4.29332386 63.3129949 3.46946023"></polygon>
|
||||
<path d="M71.1487393,7.51775568 L68.8724609,7.51775568 L68.8724609,0.245028409 L71.2410689,0.245028409 C71.9489287,0.245028409 72.5549893,0.390033144 73.0592507,0.680042614 C73.5635121,0.970052083 73.9505859,1.38553504 74.2204723,1.92649148 C74.4903587,2.46744792 74.6253018,3.11434659 74.6253018,3.8671875 C74.6253018,4.62476326 74.4897668,5.27639678 74.2186967,5.82208807 C73.9476267,6.36777936 73.5528587,6.78681345 73.0343928,7.07919034 C72.5159268,7.37156723 71.8873757,7.51775568 71.1487393,7.51775568 Z M69.8170632,6.68678977 L71.0919212,6.68678977 C71.6790424,6.68678977 72.1667318,6.57374527 72.5549893,6.34765625 C72.9432469,6.12156723 73.2326645,5.79841383 73.4232422,5.37819602 C73.6138198,4.95797822 73.7091087,4.45430871 73.7091087,3.8671875 C73.7091087,3.28480114 73.6144117,2.78527462 73.4250178,2.36860795 C73.2356238,1.95194129 72.9539003,1.63233902 72.5798473,1.40980114 C72.2057943,1.18726326 71.7405954,1.07599432 71.1842507,1.07599432 L69.8170632,1.07599432 L69.8170632,6.68678977 Z" id="path-13"></path>
|
||||
<path d="M80.8452792,2.08806818 C80.8026656,1.73532197 80.6339866,1.46129261 80.3392423,1.26598011 C80.044498,1.07066761 79.6805065,0.973011364 79.2472678,0.973011364 C78.9324004,0.973011364 78.6577792,1.02391098 78.4234042,1.12571023 C78.1890292,1.22750947 78.0073294,1.36659564 77.8783048,1.54296875 C77.7492802,1.71934186 77.6847678,1.91998106 77.6847678,2.14488636 C77.6824004,2.33191288 77.7267896,2.49289773 77.8179355,2.62784091 C77.9090813,2.76278409 78.0256769,2.87464489 78.1677224,2.9634233 C78.3097678,3.0522017 78.4606911,3.12559186 78.6204923,3.18359375 C78.7802934,3.24159564 78.9288493,3.28835227 79.0661599,3.32386364 L79.7941428,3.51917614 C79.9977413,3.57125947 80.2173199,3.6422822 80.4528786,3.73224432 C80.6884374,3.82220644 80.9127508,3.94353693 81.125819,4.0962358 C81.3388872,4.24893466 81.5134847,4.44247159 81.6496116,4.67684659 C81.7857385,4.91122159 81.8538019,5.19649621 81.8538019,5.53267045 C81.8538019,5.93039773 81.7502271,6.28847064 81.5430775,6.6068892 C81.3359279,6.92530777 81.035265,7.17743845 80.6410889,7.36328125 C80.2469127,7.54912405 79.7692849,7.64204545 79.2082053,7.64204545 C78.6802697,7.64204545 78.2239487,7.55741004 77.8392423,7.3881392 C77.4545358,7.21886837 77.1526892,6.98153409 76.9337025,6.67613636 C76.7147158,6.37073864 76.5910178,6.01325758 76.5626088,5.60369318 L77.5285178,5.60369318 C77.5521921,5.87594697 77.6433379,6.10085227 77.8019553,6.27840909 C77.9605728,6.45596591 78.1623957,6.58735795 78.4074241,6.67258523 C78.6524525,6.7578125 78.9181959,6.80042614 79.2046542,6.80042614 C79.5337262,6.80042614 79.8284705,6.74715909 80.0888872,6.640625 C80.3493038,6.53409091 80.5552697,6.38494318 80.7067849,6.19318182 C80.8583,6.00142045 80.9340576,5.77769886 80.9340576,5.52201705 C80.9340576,5.28764205 80.8689535,5.09647254 80.7387451,4.94850852 C80.6085368,4.80054451 80.4345311,4.67980587 80.2167281,4.58629261 C79.998925,4.49277936 79.7621826,4.41051136 79.5065008,4.33948864 L78.6222678,4.09090909 C78.0493512,3.92992424 77.5959894,3.69673295 77.2621826,3.39133523 C76.9283758,3.0859375 76.7614724,2.68702652 76.7614724,2.19460227 C76.7614724,1.78267045 76.8733332,1.42341383 77.0970548,1.11683239 C77.3207764,0.810250947 77.6220311,0.571732955 78.000819,0.401278409 C78.3796069,0.230823864 78.8033758,0.145596591 79.2721258,0.145596591 C79.7479781,0.145596591 80.1699714,0.229640152 80.5381059,0.397727273 C80.9062404,0.565814394 81.1980254,0.796638258 81.413461,1.09019886 C81.6288966,1.38375947 81.742533,1.71638258 81.7543701,2.08806818 L80.8452792,2.08806818 Z" id="path-14"></path>
|
||||
<polygon id="path-15" points="85.4149214 7.51775568 83.4156317 0.245028409 84.3850919 0.245028409 85.8765692 6.08664773 85.9475919 6.08664773 87.4745805 0.245028409 88.5079612 0.245028409 90.038501 6.08664773 90.1059726 6.08664773 91.5974498 0.245028409 92.5669101 0.245028409 90.5676203 7.51775568 89.6052623 7.51775568 88.0214555 1.82173295 87.9646373 1.82173295 86.3772794 7.51775568"></polygon>
|
||||
</defs>
|
||||
<g id="最终方案备份-6" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
|
||||
<g id="编组备份-3">
|
||||
<rect id="Fill备份-19" fill="url(#linearGradient-1)" x="0" y="0" width="130" height="20" rx="3.33333333"></rect>
|
||||
<rect id="Fill备份-20" fill="url(#linearGradient-2)" x="2" y="2" width="20" height="16" rx="2.5"></rect>
|
||||
<g id="产品图标/learn-机器学习备份-10" transform="translate(6.028800, 4.000000)" fill="#FFFFFF" fill-rule="nonzero">
|
||||
<g id="产品图标/learn-机器学习" transform="translate(0.000000, 0.000000)">
|
||||
<path d="M8.784,0 C9.4752,0 10.0512,0.576 10.0512,1.2672 C10.0512,1.8432 9.6768,2.3328 9.1584,2.4768 L8.784,4.7232 C9.1008,4.9536 9.3312,5.328 9.3312,5.76 C9.3312,6.4512 8.7552,7.0272 8.064,7.0272 C7.5456,7.0272 7.0848,6.7104 6.8832,6.2496 L5.5008,6.1632 C5.328,6.3648 5.0976,6.48 4.8096,6.48 C4.32,6.48 3.9168,6.0768 3.9168,5.5872 C3.9168,5.2416 4.1184,4.9248 4.4064,4.7808 L4.5216,4.032 C4.3776,3.8592 4.2624,3.6576 4.2624,3.3984 C4.2624,2.9088 4.6656,2.5056 5.1552,2.5056 C5.6448,2.5056 6.048,2.9088 6.048,3.3984 C6.048,3.744 5.8464,4.0608 5.5584,4.2048 L5.4432,4.9536 C5.472,4.9824 5.5008,5.04 5.5296,5.0688 L6.912,5.1552 C7.0848,4.8672 7.344,4.6368 7.6608,4.5504 L8.0352,2.304 C7.8624,2.1888 7.7184,2.016 7.6032,1.8144 L3.8016,1.8144 C3.6576,2.1312 3.3696,2.3904 3.024,2.4768 L2.3616,6.5088 C2.8224,6.768 3.1392,7.2 3.1968,7.7472 L8.1216,8.8416 C8.4384,8.2944 9.0144,7.9488 9.6768,7.9488 C10.6848,7.9488 11.4624,8.7552 11.4624,9.7344 C11.4624,10.7136 10.6848,11.52 9.7056,11.52 C8.7552,11.52 7.9776,10.8 7.92,9.8784 L2.9952,8.784 C2.7072,9.2448 2.1888,9.5328 1.6128,9.5328 C0.72,9.5328 0,8.8128 0,7.92 C0,7.1136 0.576,6.4512 1.3248,6.336 L1.9872,2.304 C1.6128,2.0736 1.4112,1.6992 1.4112,1.2672 C1.4112,0.576 1.9872,0 2.6784,0 C3.168,0 3.6288,0.288 3.8304,0.72 L7.6608,0.72 C7.8624,0.288 8.2944,0 8.784,0 Z" id="Combined-Shape"></path>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="Open-in-PAI-DSW" transform="translate(28.589489, 5.982244)" fill="#FFFFFF" fill-rule="nonzero">
|
||||
<g id="形状">
|
||||
<use xlink:href="#path-3"></use>
|
||||
<use stroke="#FFFFFF" stroke-width="0.5" xlink:href="#path-3"></use>
|
||||
</g>
|
||||
<g id="形状">
|
||||
<use xlink:href="#path-4"></use>
|
||||
<use stroke="#FFFFFF" stroke-width="0.5" xlink:href="#path-4"></use>
|
||||
</g>
|
||||
<g id="路径">
|
||||
<use xlink:href="#path-5"></use>
|
||||
<use stroke="#FFFFFF" stroke-width="0.5" xlink:href="#path-5"></use>
|
||||
</g>
|
||||
<g id="路径">
|
||||
<use xlink:href="#path-6"></use>
|
||||
<use stroke="#FFFFFF" stroke-width="0.5" xlink:href="#path-6"></use>
|
||||
</g>
|
||||
<g id="形状">
|
||||
<use xlink:href="#path-7"></use>
|
||||
<use stroke="#FFFFFF" stroke-width="0.5" xlink:href="#path-7"></use>
|
||||
</g>
|
||||
<g id="路径">
|
||||
<use xlink:href="#path-8"></use>
|
||||
<use stroke="#FFFFFF" stroke-width="0.5" xlink:href="#path-8"></use>
|
||||
</g>
|
||||
<g id="路径">
|
||||
<use xlink:href="#path-9"></use>
|
||||
<use stroke="#FFFFFF" stroke-width="0.5" xlink:href="#path-9"></use>
|
||||
</g>
|
||||
<g id="形状">
|
||||
<use xlink:href="#path-10"></use>
|
||||
<use stroke="#FFFFFF" stroke-width="0.5" xlink:href="#path-10"></use>
|
||||
</g>
|
||||
<g id="路径">
|
||||
<use xlink:href="#path-11"></use>
|
||||
<use stroke="#FFFFFF" stroke-width="0.5" xlink:href="#path-11"></use>
|
||||
</g>
|
||||
<g id="路径">
|
||||
<use xlink:href="#path-12"></use>
|
||||
<use stroke="#FFFFFF" stroke-width="0.5" xlink:href="#path-12"></use>
|
||||
</g>
|
||||
<g id="形状">
|
||||
<use xlink:href="#path-13"></use>
|
||||
<use stroke="#FFFFFF" stroke-width="0.5" xlink:href="#path-13"></use>
|
||||
</g>
|
||||
<g id="路径">
|
||||
<use xlink:href="#path-14"></use>
|
||||
<use stroke="#FFFFFF" stroke-width="0.5" xlink:href="#path-14"></use>
|
||||
</g>
|
||||
<g id="路径">
|
||||
<use xlink:href="#path-15"></use>
|
||||
<use stroke="#FFFFFF" stroke-width="0.5" xlink:href="#path-15"></use>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 18 KiB |
536
assets/thirdparty/lab4ai.svg
vendored
Normal file
@@ -0,0 +1,536 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!-- Generator: Adobe Illustrator 24.0.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
|
||||
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="125" height="20">
|
||||
<g>
|
||||
<g>
|
||||
<defs>
|
||||
<polygon id="SVGID_1_" points="0,0 126.7,0 126.7,21.5 0,21.5 0,0 "/>
|
||||
</defs>
|
||||
<clipPath id="SVGID_2_">
|
||||
<use xlink:href="#SVGID_1_" style="overflow:visible;"/>
|
||||
</clipPath>
|
||||
<g style="clip-path:url(#SVGID_2_);">
|
||||
|
||||
<image style="overflow:visible;" width="874" height="148" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA20AAACYCAYAAAB3RQH9AAAACXBIWXMAAExuAABMbgHZzlUOAAAA
|
||||
GXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAcstJREFUeNrsvQeUHMd5Lvp1z8zm
|
||||
vFhgkRMBAgRIAowACBDMmZKVRVGURFG0LVv2sy1LTsfv2r73Wrbv8fXxu8fXkhhFiZIlkRIpMSeA
|
||||
AAEGkACIQAQihw1YbM47O92v/q7umeowM92zM7szu/WRhZ7t6e6prqqu/r/6EyAhISEhISEhISEh
|
||||
ISGRt1CycZHv/tN76/SQ/rmQEtJ1Rb9CVdRiHUqVqijLFEWForJCW0UxfpK21mejEopZDSV1dRRF
|
||||
kT0mISEhISEhISEhITGu0HU92VeD0HGSsZROQOvWdGUP29fHmM+2//cPLnhjwknbt7+3/W4o2j26
|
||||
husYmZqpqiFQIYJmbBWTqKmqSdJUg5QpJmmzPotkzU7KJEGTkJCQkJCQkJCQkMhbKmcndOZWp/26
|
||||
HmP7TzO+8zxjNb8aK4ELzIz+5L9v/aquj/41+7hUDYWRIGvepE1lfxvaNSJvMMmaT02bIombhISE
|
||||
hISEhISEhEReUjYHWYtr43Tjs/E3baFHdV3brWjK//y7b13wbE5J2//zD5s/ERuN/jd2xmWcrJmE
|
||||
TSBuFmnjZC0U166ppsYNDi2bJGsSEhISEhISEhISEpONvJnaNpPIsa2m0b+DmqZtUzR87+//KJjm
|
||||
zRc7+sO/ffWHMS36IBG1EJE0s4TY34pA2CyNm6Fts5lH2v3Z4qaRSoK4uf3VJHGTkJCQkJCQkJCQ
|
||||
kMhPqmb7S9S2eZE23SoalXO6ov9n9UDke9/+9rzhMZO2b3/75fL+Uv1Xeix2i0jUnKSNiJwimEba
|
||||
fNpEf7Y4eYOdxPEdjopJ0iYhISEhISEhISEhkY+UzRGYJJWJJATCpmkWcevTdO2XalHkT/7+wXk9
|
||||
GZO2P/qfWxqG+rq3s9+4wCBooYiLtHmZSDp92izzSCdpc5lIxmskyZqEhISEhISEhISERCGwtyQa
|
||||
N09Nm420QdNiw5qmbWa7/vQf/3jxgcCk7cvffrm8ODT0Ifu4mJtDRmDb2ohbKE7e4oSN9jmImit6
|
||||
pEDcJGGTkJCQkJCQkJCQkJgUxE0gbJ6+bUTYDAIXoxJlxG1TSNG+/vffWnI22U+EvXaWhIZfiGma
|
||||
QdigWAFFhC0jZSAyBiJkIaGYf4OCjpiaNii2rRjmX/qxSUhISEhISEhISEgUNBQPU0kK3wFO1KxC
|
||||
ZM2gO7rJetg/uoII214T1fCPf/qvp37335L4uLlI2wPffe6RaHTo2lA4EideFhGzE7KQQeDoMxRe
|
||||
FMffInHjWjRV0KYp/H4E4iaTZ0tISEhISEhISEhIFBLExNv8s8XMFFMLpxvEjiuvGAeypT7TqZSz
|
||||
XXeVRqL/wHb9hScvFP/4xl+98MDwQO/DRNgMk8dwxPBli/uzmVvLPDIk+LUpjpD/tuTaYn62OIET
|
||||
SZokbhISEhISEhISEhIShUvYUiXbjkeVNP3ZyEzSMJHUDRNJ8m9jRTuqx7S/+t6fLP6l83dsmrbh
|
||||
of6/Jg2ZbhAr0/wRCa2ZqD2zNG5xrZqgXbPt19m1LOImEDaXps3c6rLvJSQkJCQkJCQkJCQKARaX
|
||||
MTVsTk0bJ2s8Mj6jaSZXQtx8UtdVxE0odX0e+/e+P/3XU79xmknGSdvXvvP8N6JDvYtIg+YkZErc
|
||||
zDGxdZI04xxdcexX4sRNN7Vsum4PRMJvS5FsTUJCQkJCQkJCQkKiUNkbDzoSN4lUhEiS1hGkHNNM
|
||||
fiQSNpWbTyp6hJVLikPRL7Gdj3mStuhw/1/FSZalFbPsLqEkSBgcGrX4d2riXF3wYTPPFwmbbtlw
|
||||
6oJ2TZpFSkhISEhISEhISEgUIuIaNrsuSrSe5JyH8SOKIGkSNVjETecciR0/V9P0r333nw4/9y9/
|
||||
ubTNRtq++mcv3Dsy3GVo2eLEzXKQE/KtQdyq9n16PKy/9Z25HwmipigJjZtRcRmEREJCQkJCQkJC
|
||||
QkKi0Dmbxx+64iBuphIuHohE58RNN/gTY0v0pUpESp+PIuUuCNo2g7SNaoNf0h3aNbemzfFZN4ug
|
||||
SYP4nSJo1gQNm6IrCbKmJwibnufmkTPKWlBZ1IuyoiGUhGMIh3WURwYQCqlGTjpV5YXuxdoqQlqD
|
||||
ZFuJXC12JAbUwEgYw6MRREdV9AwVsVKM1t5K9A9HoOmyHyQkJCQkJCQkJMYKRZA/E1zHqXWL+7zp
|
||||
Fneya9qMrY56TdfXuknb8NBGivwIJ/kyiyJsFV1xHaeIJE5JEDdFcZxvfe8I9W/qCvOu6aeXnsW0
|
||||
krMoD7UjzJonHKaImaH4lkfPVBAyImd6E7ZUxE2St9ySNevvqlKyGR42Ps+o6jcz0jehd7gEpzuq
|
||||
cOx8PQajEdl4EhISEhISEhISmUqhiXzUSJAzxSJupoyqwFJWCZxJUIKR5SL7r4J9sfK7/3S4wTKR
|
||||
DN/37ec+PTzQVW7ELDHNFw1NmPBZN7Vk4meNCIq1H8LxIt2zgo8IW4uo6A5mmk+oKTqP2SX7UaJ2
|
||||
IqyGGWHjRM1O2tQ4WaN9qUlbcg2bJG25J23WNrG6wbe14VFUl57HooZOHD5Xh+OMvA2NhmUjSkhI
|
||||
SEhISEhIBIQZMTIh5HO501RY6ebXcZNJM94H/a2ZloiaEWEyzsmmxcLqOvbnswZpi0VHbuGRKM0L
|
||||
xiNOCqaOlmmkbjeVdJpH6qI2Dd4mjwlTyPwkK4vKP0AljoJRNEbKODlTTXJmETRezL+F79zEDS5t
|
||||
myRrE0ferGItdtBn6oeKEh2r5rRhTk0fdp2eifb+Umk2KSEhISEhISEhkQFxi0ufiJtMxrVtwlYX
|
||||
lF8QA5YoZgIApZoxikvipE3TY3MTkU4cppG6m6wpVkRI04lOJGy8bonw/XEtm+XHlsd+axF1BAuK
|
||||
t6JUa4NqadJCqqlZsxeuZUto2Lw0bWQ2KWrXpEnkxBI3O2GzCBzvJ03TjG1D5SDWLjqFnadmoqm7
|
||||
EjFNlY0oISEhISEhISExBnnUJHMO4pawAkvwLF10OdP1kpiuN1rXCeua3qDHmaE92Ihu05YJ+wTf
|
||||
Nl3QzimwzB4VG3lLyUfzgMBElGEsjLyMiNYDNRKJa9FIw2Zp1RIatpBB2MhEUiRsvCguE0lJ2vKD
|
||||
tNnJmr0oSsJxtKo0xojbaew4MRunO2swKombhISEhISEhIRExvKn9cGhbXMEd9StbZyDKRFdQ02c
|
||||
tGmxWH3clFG3kzfRLFIkcXEtm0XUdEubJjjXifWNJwW3m0bmS9TIuUWbENK6oBIBM3z1LE2auXUW
|
||||
c7+qJrZE2JzaNpGwSfPICXhgFMX1tydpo/z0mhFi1RiZxeEYVs1pwWC0CK095dJUUkJCQkJCQkJC
|
||||
wieURIRI2JNsx00fLZPJuNbN47OOIrYRSJum1cV90ERSpgM2rZsuEC7zs6iJ40TNDxnLL8I2v2Q7
|
||||
wqNNUMMRk3iZpo0qD7RiRIeMm0pSUUyippgaOcXYly7cvwIZ6n98nxfFttoh5slwFsXIRUhbLT7W
|
||||
y4ujuHR2E94ZmYvOgVLZnhISEhISEhISEhkRt4TSSrdZKbrMI+08LMRKmUDaYjVqKAJBcxffOvcl
|
||||
HONE8mGaQir2DOCKbb8ZXRIJ97x8cG+rCjejaGi/YRJpES4j55oi5F5TUpeQaj/eFTkShZGfrX9I
|
||||
xfuHi1FXFcPFC0YmzSOjQ3eQNx1e/9ncOM10GdMqBrCgvhOD0bBMCSAhISEhISEhIZGBLArTbUx3
|
||||
cCpRGyeYTCp2zhUnbQkapdhVc3BoJGC3v4wzQ4EpFhoatK3xKI+qoFlTLI2boti0b0n3iRo6gax5
|
||||
5WhLyscnkMsdaYrg356pxHuHIqit1HD31YO4/6Z+VJQWTqcmG3+KYo/IEx/LClxaZadJsKopWNpw
|
||||
Hme7Kg3SVohjXEJCQkJCQkJCYqLYmkC8bJEjLe6le1iBKXafN4u0ieTM6RAHG0lzqOwEu0uRtCWC
|
||||
keQ5YYscgRLtgBIOxxWCShpC5k3SYPusKFaOOnux6SvzSNvWM6Di756swq6TRdBHgYERFd9/uRJD
|
||||
Iwr+/NN97L4Kg6l4NanoCGoucAjrG4kVDI39S/1nH+u8T4uLNCys60TPYDH6R4rkBCQhISEhISEh
|
||||
IZERabP5tMHbbcdZEqRNR1LTSJGowWZ7CZvznE3TJgYjyWNUansEwqV6kjDXPsVN0kSypnok1TY8
|
||||
/1xBSPKnHZ56qxS7TxQRczEytlua2p+/WYZPrhnCsrnRSfX0cPKm2AidaoQi0RP9qEJIBwDMreXJ
|
||||
t/uGJWmTkJCQkJCQkJDIlLQJSq84vUqE+k9J2pxkzEbSRALnYIOKkKvNRtIKQDFDIf6V6DkoYdOM
|
||||
EaI5owrHXsNfjX+PxGeYJC7pf0j4szkSiSt5klj8THsYP3mj3CAmZgwODvZ5IKrieGsEy+eOTtKn
|
||||
SBE7hBE3NaFoNtdBuK+ihtKiGGpKh9E5UIZoTKYAkJCQkJCQkJCQyAJpc7mimRaLouLMIm0iKXOR
|
||||
tDT7RRMzMRhJvqM2fIyRthirfcRuxijchlPTppgaRE9zyBTFahobacuTZnri9TK09IS4hs1ZR1Yq
|
||||
S3VMiWCXeqJfkpUZVT1o6q7ASKxYTkISEhISEhISEhJBxEzb36l5F0+j5uRWYVtwEVcwBsSTvYkh
|
||||
/m1+cA7NXCGgDE12vzMIhE3Qkrky1ileWezMoovf6/xsM3FzfBvvrYlvrA9PRPCrt0qNeuuOsJ56
|
||||
CFgxJ4qLFwyjkKNvKAEeIV1Y6nA/CUBd6SAioZgMRiIhISEhISEhIeGbrfnWtKXzaROjRkKHR542
|
||||
IWqkM0+bjoTQX0DCrIoBt4Av+KHZt2JESLjzryVJnm1977WdaIxqCh56qQL9o6wfNTthIyvBEOvU
|
||||
+2/uR12Fnpb6FMxzYxJnPQnrEiN+AoqrbynhthLPsyEhISEhISEhISGRXdKGdD5terIgI6ku4Ige
|
||||
WUikTRntSRAz2IkaF+ATW2+TR922tTLU8WvoHkFIEtt8wBsflhjFIGy2huFp/K5bOYS7rhqcnE8O
|
||||
RJNPe3/EtaUepSg8avStJG0SEhISEhISEhK5IG1po0cqSGJfmewCgCt6ZCGRNl3r50mWrZsHzDwJ
|
||||
3CwO1hZ6Us2MleXcOE/RzWAs/hthorRuPQMKfvhiGWLODidyogLlYR1/cOcAQgUeb8PZF4nQqrqt
|
||||
v/mY1h3f67Z9BCJtqtS0SUhISEhISEhITARps7O0AAUeLK9AoGiDUMIRTzNHO6Fym0ImSFeCfMWj
|
||||
RCr5lYPNC09tK8feUzzEvzh4DOLO/vnCxkFcsmDEdd6Z82FD0zS7PlaQz41lGiluxUHsJGOefVlg
|
||||
41xCQkJCQkJCQmJiSZvn36kikXhxLpiBSCy3JudnXdjnhCrsV8y/LeFfTMstbp2fJ7YRdQ+qa5Ix
|
||||
QdMGQfsGIUAFLCVd3LHPZD1wBB/Jo2AkTe1hPPFqqRj+M3EfrAPn10fx5et6XXU73xvC7/+fGtRW
|
||||
afjPb3aiolTLf5Lm7HKzH8WtEF8ViUCrif61+t4qWpJnQUJCQkJCQkJCQsIvd/PSe2kC79I95M6E
|
||||
eWQK/zXRh000izS4igJ7cm1Ras5XrYSoJUuiPXN+L2rUxGOT/kTeBSHR8fjrZWjuDSd82ax+NP22
|
||||
vnrDgKcm7UevleNIWwTaOeDZd8tw73V9k+6B4ukr0g9YaR4pISEhISEhISHhl615mkcC3vFEPPYn
|
||||
SBuEg+CV5C0JcVMcSbj1/OZpLsnb0SqKqHUStWgOTZuTzcZ94BQlryX63ceK8Iu3yhKEzbpf2oaB
|
||||
VfOG8ZlrBlz3cPBMBE9tK4VmcrnHXi3DLasG0FBdYGaSokbVNtCFfnb0u7PPLR84CQkJCQkJCQkJ
|
||||
iYxJmyNtmif/cvCqsHixZAm1rc+KY+tlmlkIEKNAWn87I0L6bxUzJ1set8BoTMFDL1eif0SFotmr
|
||||
aiQLZ39/845elBbZiZim6Xj01Qq094fidrOnO8L48Rvl+LNPdRfmk+OKIpkIPBLvT8WZz9A8BtKl
|
||||
TUJCQkJCQkJCIrgEiiRMwsk2vL43SJuYa82mcYMjtD+8FRBwKDAKqgWTtVSK7xUr7bb5uRCk+Fd3
|
||||
l+K1PSVGPzt92TT2zx2r+3HtimHXvbz1USle+KA0YVxralh/ub0cd145gAtnRwuZsyV/Smz9be9z
|
||||
qWmTkJDwg5ULgZn1wNwZgKoCbZ3AyVbg8GlgYEi2j0R2MbsBuO0qNu4WpT92xwHgpfeAjh7ZbhIS
|
||||
4yF7Bta0pQr57zKB9HGRgidt6diZLQeb13fJlJf5he4BFT94qYL7HopRZhRO2OrKNDx4Sy9UxR5i
|
||||
Y2hExf99oQJRTUkQPfBrtPerePS1cvzTV7vM9OuTmLUJx+iStElISKRBYx0Tnq8Gli8AiiPCFwv4
|
||||
hojbb98CTrSQFYRsL4ksveF0/5KITeaTkJAoHNLmJFx+8gU4fyjP3blSyu82rRngqUVzaltcJY+t
|
||||
I3++tRwHmoqAmLuOlJftixv6cNHcqOu7X71djp3HS9w2sCb5e+H9MnxqzQDWLB2eHJwN6TmcfMmN
|
||||
HdNqgKVzgKpyYE4DUFrM98+o5/NIRzcwwobjABtW57uA9h7gyBn2uVsKuBL5DxrXn94IXDCba9e8
|
||||
MH8G8NnrgZ+9ygmcnFOSI8IklLoqoLIs/bGDw1xzNDg8RRsr4DiabO+zilI+VooiqY/rHeDjJDqa
|
||||
2/pQrluqT3WF/3NyPYbl85T/pC0ZD4uTNqePWqZp2gqvFfUkvmyw+Tr5s6HMzxY41RbBk5sqoGnc
|
||||
BDbB1tj/IWBRQxT3UYh/R/1busP40evmTCMGLhEGynBMxX++UInLFw8jEtIL58kJrGlL9K/0aQuO
|
||||
MHtxLWYC7EULuOahoQYpE7dXlHjvb+kAPjoB7D0KnD7HiN2obFuJ/MN1q/lihKqmPo60cesuAbrf
|
||||
kSZqqUDC5U1XAFctT3/skbPAi6w9Pz4jOVsuz8lH0DtlzQrglqsc2m0PvHeAj5P2HD93DbXA7WuA
|
||||
VRf4P6erj9XtXeDtffJ5muzPp1+fNlcgEk82J/qyeWgakrHDwms+PQPhPZXzW37h8TcqjRD/iiNi
|
||||
pFXt+2/qxbQqt/T7083lONEeTphTwk74rPPfPlyK598vw+9c3VdgfY7M+l1q2vyTtRCweil7QVzO
|
||||
BdR0Qmw60DWo3HAZcKwJeOEdvo1K8iaRR2Ne1B6nw6KZQHkJEx67ZdslnbWDzLlT3BoiyL3rk6yt
|
||||
5k4HLpiTnrCN573PnsZLENRU8HNoXugblM/TZGJr2TOP1G25hW1R0OFQ1YnHKIVM2pxRVuAO/+56
|
||||
uj2L11OQH7aSu46V4qm3yxNBRKz7NjOhX7V4CHdf4U6kfehsMX62tSKRGsDZboJvGxnXPvJqBTZe
|
||||
1I/aijxPO+0V8t/G2dLbBMsJzB9Iq3b71cC8GUAolP3rL5oFfOvTbIx/DLz2PnC2TZpOSkw8SOAi
|
||||
wuY3JWdtJRAJyTnFz7Tt+7WOKUzacinA5zFIy0bmyPReyJd7Ly7igWHIsiQoKHhRIysfn5bP01Qn
|
||||
bUiWp03RPeRWuD8rTplXJG8OFEzOtkmIUU3Bf7xYhaFRRxAR8OiPZRENv39bD0qLdNcD/f2Xq9A9
|
||||
FPLuPMXxmQnKH7cW4xfbKvF7t3ZPiWdPTmDJQf48n7ueRy8rCuf+91Yv4QTx+e3AO/uBfhmRT2IC
|
||||
0T8YbPGAfDdjmpxT/AiOQU6Yypq2qSiQB9Gyjde9k7aMtO6ZgM6j8w+fks/TZCdtyQI/Im0gEkwl
|
||||
fzaecStxP7rwt/2/hC9Tsv8gfMofvLSrAm8dKPVMske+bLev7sfaZf2uWr++rxSv7ClL+LGlGoQm
|
||||
gSN/uSe3VuK2y/oxryGa133uvU3Vv959LuEGrQ7ecxM3+RqrKWQQ0Iv6E+uBMjbcN+8CevplX0hM
|
||||
DPqG+PiLxfxpmJvbgcERubjpQ97JnVA6hQnuZGgr0rItmcv9pvNpnMycxksmIG39LHZuTSXQ2Suf
|
||||
p8k6lwV10oqTNsvU0eusZJZiisOqMH5uIanXnGpFm44SqQ1LU5pHTiy6+lU89EqlEc4fjkTaYILE
|
||||
tPJRfOPmbh6qX/iuf0jFw69UG1q6lFo2h+aO/m7pCuOx16vw375wPr/723Prt6+leWQy0Avz89fz
|
||||
FU+/pmHZBPkSUX4iCmJCTtwysIPEROGtPXwBY0Zd6uNII/fuAe7PJueUNNO2Hux4XbI2X8dOhrYi
|
||||
DRvlQ/SrZRuPe7ciI1eVZX6N2aa2LdvvMvk8TRxbS6tpQ5pI/SJp05PwEE9zSUHeVZyDQC+kVkwV
|
||||
BTJd0BFnrP/8ufGn3q7CwaZiHuIfcAUSuWdDLxY3umO4PvteJXafKIUe826tsKozQUNJXE64ZRLU
|
||||
f/1OBe64vA9XXjCY30+O52DV/Y0JOYG5sGIhN4lsrJv4uqy/hG9fkBH5JCYI+47ztBa3XsV91pLh
|
||||
zV3AniMynLYvoTGggCnNI31KQAXeVuQ3tmwesHBmBtJfDu+dCFumppEW6H1K2rb9x7kJtXyeph5p
|
||||
s4XaEC4XBpL7sDllVy8lRPwY6cQ24Th9vghPvFFlf8iE4COLZwzjC+vdvmfnuiN49PVqw9TRBXZu
|
||||
bVkM//K1c3jnUCkee6OGEzuhvyloyWCMknjXYNU3hwokBUCGE54c43HQS+WutcCM2vypExG3pvM8
|
||||
ZPKAFIglJgCbdgLHm9mzsQ64cJ7dv/PMOb6oQORueES2lS8ikkH+MUna0guShf4+u3ghcOkFGZjj
|
||||
5/jeZ9Xzd+OYCGmEa+wpp9q5Tvk8TRnSBh/RI53mkck0ay5HOcWD2BXa7OZstWTRWOAjgmQekNZH
|
||||
Xq9Ca184YRYpaNkU1sn3X9+NhspRVz2f2FyJ051hz/pTAu7PX9ODjcv7sWr+EN76qBSHW0xNnqjF
|
||||
Y7/59sdleHV3Oe64rDeP+zyAeaRj2UOStgQoJPEnrgHmjMEkknx6yDysqT0hwNJLalo1ML8RKCnK
|
||||
7Lqf3AB09PKcbjIlgMRE4HgT8H+eSixu0DPS1sWDj0gEF3iCTPMy5H8w8lKIIPPByy4cg4VHju59
|
||||
ei1/J6Yy1yTtOmnPyopTE05yNyCNXWuHfJ6mDGlzUgskCfnvIl6TJ1XZlMGOo6V45t1KN2GjbYiH
|
||||
+L/rCjeZ2n+6BE9tr04EHxH9E9mEMrc2ii9cw23NqstiePDmbnznieluHzd2fpRtH36tBtcsH0B1
|
||||
aWxSP4BTHXeSFmE+EAkYJZLMFinSIyU4PdeRPNpeCXvpLWXXv+ZinpQ7CIGjF+Yn1wNdPVzjIV86
|
||||
EhOJpjbZBuM230pZZNK3Fb1zKGE1mebn273PbeBkKxVosZJysFGKgorS5MdZJpK7D2fRRFI+TxM/
|
||||
hyXzzEnHvSzSpsMeLFAVjnGOE1XYL3ICZ3HGqkj2eWInKz19MBLo/nO1TSCiTPB9+JUqDIyqPJG2
|
||||
o8GLWI/93s0dKI1ots6PaQoeeb0anYOqt5ZNIR+4bsypS4Q4u21VL555rwLbDpcltG1mh5PGdt+Z
|
||||
Yjy1rRIP3NiZZw/OGDVtRiASHZqcgnDFhcAli/lKoV8QOXvnI+DFd7i5R7pHZiDKXlZHeLn6IuDu
|
||||
dTy4g1+tHpmWXL0CaO/NfgQuCQmJ8YGGQLE1jOOn6hwdNHJkobbVRQu5li1Q8JFxuHeKZDmzIb32
|
||||
jyLM0juQjktF2kgLRwSQrnmqVT5Pk5W7peJrGryiR8IjTxtS5Gxz+LgVJhv3CqSpZ1gw4Tf/6p5K
|
||||
vHmggpu5wsGa2YN/5xW9uGaZOxb61oPleOXDck6+nBFA2XkrZg/hs2u7bPdXFI7hD25tx56TJegZ
|
||||
UPlvCgm31RDwxJvVuG11H2bXjeThI+MnEIl3kXnaeKRGIkNkxugXZPr45KvAB4eAoQyGBGnmyNzs
|
||||
q7cDS+b492G4ipG9HQc5aZPaNgmJAhR0gphneURam3JtFbBdC62tyHR+1ZL02qyJuHfSilG90r2f
|
||||
upkodqKFuwBMT+MPbplInmyRz1Ohs7VU5pHpgta7o0dafm1ITsicLl4QfNukCnXi0DMYwg9fqYdO
|
||||
KghnxEiFQvzH8MBNXa7zBkdU/ODlWkRjHjMM21Wk6vjW7Z2oKnWvs1y+aBB3XdaLn71dnfhNa7iw
|
||||
w1t6Inj0jRr87WfPTV0BYpKCHL8pFHE45O94i7C9fzAzwmahpQP45Sbgy7fwl50fjRv53V1+IfcJ
|
||||
kNo2CYnCnHODykcyEMnkfJ+RJovm89VL8/NdTr5s6cgkBXsjF4EjZ7h554Vprllfzd+3pFUcGpHP
|
||||
05QhbfCTXNuZiw3J/4aVq60gw/0j0UKAR6AR+FM15ol55H9tq8GB5iK7qaLF2dgkd881nVgyY8jV
|
||||
P79+twq7TpQmCVoCVES6ML2MkT3dTero0K9e14FN+8vR0h12ETe6JqUQuHN1Dy5blCcpAIKaR8Jt
|
||||
BjvVSRsRtTUBtWzPvw3s/jg7oc2PnuWR9z57XfoVSgtXLOORJMczBQCRRTLlLGIv2nkzuBkpra6e
|
||||
7wa6GHls7ZyYgBRe9SKcOgcMDPGgMERuR/PQHdWob4m7PVva87fO+QhKR0DaCqtQhFVqR/Ivpe1Q
|
||||
nkW0DOyCk0dzdHUFb+PKMr61TOGonakYY7gre8GSAufgSiG6kA+xUecy91jp7ktsxxPL53PSlsws
|
||||
f9icU/2YTWZbbDPmpum8nVKB2o0CEtH7iPqeSFw6zdzcGZwMHj49tZ+nKUXavII/WnKYmCw7nWkk
|
||||
HARO1LApevKczLLPc4OTbUV48s0qu6Gy2eAKE7AXN3iH+G/tiuDxzTXxQePsLy02glMtzfibH/Xj
|
||||
sT+vQm2F+7cXTh/Bveu78K/PTbM7QpqldziER16vx8ULziKi6pPm4ZvKExiZpZCphl8t28fsJbPz
|
||||
ELfhzxboehcvYgJROc/Tk1ZwYsddtIBr23oHkh9HTuE3XgHU+yCkr78P7D3mFnDpGmtWcmKbKrEq
|
||||
CWsfHQe27+NtlEtBme5nGRN2rljOcxqVpvFDJF8LEg7IlPXgyczJ9ni2J9WZzGA37+KC0EQ+oxcv
|
||||
5mknaNylw09fTe+rEqQdqQ0pubdTmCaiTosX61byqHupxiVFXN2ymy+QxPLAmSWI0DiRuceImC2e
|
||||
zecmWliYXudvDFBAChoDtLC18zAfv2Nqq4Dt6mwrv2OFCNyJZuDd/dzvONcEjhYKqV7Uxl4g8kPz
|
||||
FmnjaL7P5N7HAtKGzfaRm+1MG0/7QYtM9JmCkqQ7j74n08tDp6bO8zQVSBuc3ArwDKOBlOaRaRJs
|
||||
I1kONxRYx7oorp8kCck0bxN18zoe31RnmCLaPEVNfzSq0v3Xd6KhIuqayX+ypQYn24uM8+JKNiuN
|
||||
AyvRwTbERnqw43gVfvzGKP74bm8p/XNrOvH8zgocbC5xR59h1359Xzle21OB2y/Ng0zHgQORuFcr
|
||||
pvoEtnKRP0GEQC8l0orRSymbbUar0iRUkjA722cC0wvmAO99lJo8khaKrkkBTNKBCI0ipDyhF+o9
|
||||
N/Eol36iaVIbrl3Jyy4mqP16C3+RZ1NILi8FrlsN3HIlUFPp/zzSYFIh4kHC5G/e4oJ8UGI51vb8
|
||||
3A1svC3kWkE/db5zLavzxbwt3/2Iaw4nAtS3JFimW3EnEIFO92wEaUdaxacFFeuaJOjexAjfxtWp
|
||||
Ax6Idad+v2wpJ38vvzc2EpHNadu/lDl+czS1L7UV+fjOn+FvEckJ6hciGVQoIu/2vTzPX1bDvPsQ
|
||||
4OlebrmKjZVVXGuUDqTtsupN8wQFmNp5KDcLUETEiLBdlsKWkJJQE+mlRZNskxc/IG0YkfVUIGJJ
|
||||
8zzlEaXfpq0f0kbPJR1DYyXVwmOhP0+StHmTNu/k2kJRHNd1fuelgS88bVqQQCRA6picmJC733W8
|
||||
DM/sqPTuNPb31Yv7cfcV3a66HThbil9sr4oTPUXsRJpYYoMYGW6FqvBYoD/ZHMZtl+tYOsstUdZW
|
||||
jOIbN3bgL34604hECcCVbuAHL9dh7ZJ+1JSN5seTEzgQSeL7qey6SeYyJDj6FUxo1ZNM16I5MFs7
|
||||
dJq//EhY90OS5pumdbqP0RF0BrlyOfCZjbxtMslXR/4ZJPg88RLXFmVD6NlwKXD3NTzpuTKGcL0k
|
||||
hHzrM7xev3gjmCCZaXuSEPzZ6/nKddC6kznal2/l29c/yK6GN5f3nc3rWecsnMXTXqzKIPkwPStE
|
||||
9qgdn32LP2uF9mbPKTFn7XLDZaxc7n8Ryy/hv30NsHQuX3zYczS3Y88654LZwCfW84jAgRNVm/PE
|
||||
1+8EFswEntuefa3bpUv4YkKyhQfSVu45Bhxr9k/asim1EeGlNki3MNJm5iQdMUUhIm2nz/E5L127
|
||||
0/WJGBI5nWzP0xTjbknbN51EGidtcTLmUC4o4hZJVHXC9wXXs46AKkmTiiPJcV5OguPEXokgff/V
|
||||
OiPEv+33zJwLxaxXf//mdhSFNNsqia4r+OFrdegaDttHgkCyooMt0GOjUENF7M8YWnqr8djrI/gf
|
||||
Xxr0nFhuXNmDNYsrse0oI5BR2FIAUFCSAy0l+NlbNaw+5ye2u/XkWx95tae8TxvZ1PtZgbVAAkdX
|
||||
X+7aa/8JrkGr96HRICGLNE+qklyblYkTP5mtUSqCID5+XiAi/JXb+GfKX5cpcSMtCwlfNzJhsiqL
|
||||
wuSVy7hA+eOXeRQzP201Ue1JbfCpa7l/y5u7uECXr6zNz3yiZ3A9MtH71EYujGcKmusp1UYsBvxq
|
||||
y/hofjJtI9drPcdzND0LJERnk7CJIE3tvbcApW/yyLm5aKtsjhUCLZ4RkSU8t43P/dkABZ0iwpZK
|
||||
i/XBQW5JQSaqfuembL7LyZctnZaNQIsfVKzfNUwkz3EiNyeNto2+p7Lv2OR7nqYKWwuiaUOK6JGq
|
||||
k4TZSAs8nOFSxWpIsz9fSiZalnwqL+yqxJsflduTOFi+hWzyvPOybqxZ0uc6b8vBcry6p4LncvNQ
|
||||
l45G+xCNdjHCVgxFZaRNjbDdo/j1OyV45+Miz7qUFml44MYOlJLfmioQNmscsN/6+fYaHG+L5GVb
|
||||
BiqCieRUK2LginSgFzb5Q5EpR67qQyuOPQFMReY1JszRUhFzv3PwZUuAW68cO2FzEjcysQypwduD
|
||||
nO+/dDOr01XZJWwWaPX//tu54BBkocNve9L1iWxmqz1vuzphdjTu75cMfEay0Y6ExnquaR2rEG6B
|
||||
/FgNTYAyke/r7LXneNcpU5DmnjR55I+aq3qRZuzmK7M3ViziRtpC0jqNtY3pGhsYYVu9JPlvkkXH
|
||||
2/t5EKVMFjjGWui5MHzOfJgv08JHS7v9fMNE0sd6thWEicyuJ+PzNJWLHx4lvgPUTC4wGRrJV2a7
|
||||
PCzd/SE8+kY9dFV1mSJS8JG60iju39ie8FMzS/9wCA+/Xo+orroeXPozzEiXEmtl36lQGPMjwgaD
|
||||
tKkYQRW+/0oVhkYUzzqtW9KP21f32Mw0LdMmIojNPRH8eEv9ZOBsU7ZYJoZ+QC8nCmCRy/rQy69v
|
||||
wP9LaP701KQtiGxMpnuk0ZpRl11BjYgbmbSRwBa0PSii5jUXB9OGBsUiJtxdx4Sy2kofdQrQnqSx
|
||||
+PS1/gQfv6ip4Hn6aDuuzwqyLDgGaEfq+2sv5RH2sgW6JvmykoA/YfNPwFXtyUDaCBQtkfxSycw5
|
||||
2/WixRG69iWLs1tnIm5XLedkf6xkaN3FvCQzHaRFQfJfpaBOmQbYGGshE33SBqZzGyDfUDKFdL4X
|
||||
z7bx/ZoPf2ZaMJvdMDmfJ1n8P9uqF2fJlMMUCvfxV1OM4a5z1xK/eLsWB1tK7GH2TbNIOuKL6zqx
|
||||
pHHIdd5vdlTj/eNliUTaSqJQQuw7Vvfij24fRihcxsgfadlCbLKMsBI2zCR3HK/FczurktbrgevP
|
||||
Y1pFjGvbnGAT0jPvVmMn/X6ejYIgfVfgnHNMhQhKiU9/tpOtQP9Q7utE0bf8RjekulvrHOlGRDpc
|
||||
z4jLzGlj8xdLZRq19mJu0um3HTasYoL6smCEjUxzjjdzv68gwh6tpJPfCAkpfp+wdCD/O/LBysSf
|
||||
JhUuX8qDsEzk7OJLgMzStYisUaRNP36eQbBwJu+fiZ6ps9We410vCvFOGpVMQvtT2y+Ylf16UY6w
|
||||
TH3Y0oHSStCCyVjGzKqlfAGiMkXU2PcPcdI2qgW//2yNAbLg8GMaedokZ87zqe6Ug7TNR8AfygNH
|
||||
BDHde6xQn6epXuCz3+KBSJxh/xXdbS4JwVzSIgm2CCWAPSiGkub7CbMx1T2MR52tlquli8xxsr0E
|
||||
P3mr1jA5dIG164L6Idy7rsMlhbV0F+HRN+sT54l9wAhbRVEM37ihjZ0fw0u7i/BRcwVULcqOI20b
|
||||
I3BMOh3Vwnh8cx02Lu9HQ6Xb6Wbx9EF8eUM7/v3l6XZ1rtn3AzEVP3ytHv/x9X6EJiIFgFfUyIB2
|
||||
ElPVp420FUEEQdKCDQ3nvq1Io0f+X37ICpEgMbJesuExVhCRpBVgivrXWOcv+qEXMSI/DT8JwYnk
|
||||
3bEGaKjxd22K1Pja+9x81fKdIzMkEuJIW0cruanIKLUhRZij5LCnz6V/3LLRlqSJo/sLSkbI33HR
|
||||
TL6anY08gb6lnICaNj/T1lhAOaFIKKSFC2rHoNpYEpxJ017HCHD7OAcC1gNK4uMyRyepE43VY03A
|
||||
gRPARyf48+H0T7U0wBToJV0iZgJFUiWNG2mT0vqJZSlqxFjHi6UlpEKpI4JiQSMnbERQkoHa+K0P
|
||||
uVlk4PvP0jih+Yi0X40+LC6a2vg85PWbp1v5WElnuWH9Hmn3ms5PoudpisDVjo7A9fCKq4Ekedrg
|
||||
CD5iS6rtlS/AIyDJJGjSMfDg8cFDb9SjtdcM8e8gxyRokbarrsKdtfeJrXU43RFxBx8xr/H5qzux
|
||||
tJE7CH3rDg1/9Ag7LFRimEYqSrFB3hRNx5FzFHmyBn94q3dyoc9d3cFIXxUOtZRAH7XXT48B245U
|
||||
4OU9VbhjVdcE9a/XYA2yBjI1JzDysQqiVaIE0hQIItdtNTjiP0w+aYZUJQVpG0M9Dp4CXn6H5yty
|
||||
kgPKjUbREC+c5590EIkiHy96yacLpEF+L34SjZPw+NttjLDt4Pm4nMImBTwgInbfbdyHJBXZpHsh
|
||||
PxjKj5YsaEqm7Un3+8LbPHebWE8SIEnDSVq5IH5vtBJedmj8UgBkYh6Zres5+5u0EBTN77QwXRex
|
||||
MXjpBcAnNgBL5vi/HgnvpEE53z3+gk5g7Yk+fn1MZm0nWnjQm/cPpie1tBDz8rs8PD7NC7QAkg5E
|
||||
7khYT7eIM5bbpgWwN3aaRKjH/txR1ELylaXANH7nMDqPUlWQyfPZAASDFgiuXcX9KFORym37+MKT
|
||||
LdBakHGSBTJC/bKgMb22kgjwqXPJF44sE0k/USSJyFKhcybL8yRJmz8XNBtpE7VsCpIHIlE8crpN
|
||||
HtKW33j3SCWee7/aziEs4hUCrlrUj7suc79R950tw9Pv1Mb7Lk7YTC3bvPoovrS+PX78LZeO4JZV
|
||||
Gl7ZX8+I2rDh26aSQlbhqtWfbK3FLZd0Y8lMtxRUzwjjV69tw1//fE78eKuu9JMjowoe2TQNa5f2
|
||||
oXbCUwBkOOFNwbFOkRdDIf/Hk8BIUedy3VZEDv2aHMXNI/XsSjwkrP1yU3LzlgNMsPhfPwU+vZET
|
||||
rFTmPiIoVDsJdqny8lCQAhK8/ZitPr2ZCWUfpCaBdA9EmGh1P50WgPznKNnrmbbstefJZp76gEjw
|
||||
qCNVBAk8VDfKCUUBW8gkyc9CAvkfUvuM23Ob7chsGdSb+pEi+FGycSeppsUUilDa3MHH5JqL/JnI
|
||||
EVG2fBknYqktyAk5J23m9YmsUTvvyCDiKy14UE42etbSkWdqeyq50spSREKaw4gEuRbG2HN3mD2P
|
||||
Z8/xZ++OtVxb6AdELmjR5IxPgkGLgxQpkkqyMUnz/VZGLLfvcc8R4x0VkeagVNpAkRC3pMhXSs8k
|
||||
kbBzXem1dvT9XEbgI6FE6oBCf54kaUsRMdIjV54qdmQynZLz78K3HfURqsWrFSfIK5PIzkOv12NY
|
||||
U+2JtE0CVqRoePD6NpSGY7bzaFJ7+PUG9IyE7CaVJuEjzcNXN5zH7NqEd2xY1fC7Nw2gqjgKJVTG
|
||||
BkiEHatypRy7RtdIBP/31enccdajrnet7sZ6RsqIEMIhVFFQkn1nS/H0u7UoxNA++hT1ayPSFg5A
|
||||
2kgD5vQzmGi/EsvEM1u2/oRntwJPvspftqmuS+1BxwXJw3YhI2QV5amvS0mT/ZhF7v6YawF6B9O3
|
||||
J5HMj8+kr+diJmiWl2WvPYlMPsYI274TPLdfsmvuZQLmOx/5jxxK2qGiosL2wQoC6rdXdwCv7ODj
|
||||
Ltk1SQAn7apfLQgtNlSW5bc/23jJKeSv+xtG1v75SWDLh6nbOd2z5kWUXIuhFmnLQZuRNvbxF3ld
|
||||
Ul2zb4hr6mnhxO8cRgSDit96X7E8/cIWzaGbd/P6jIc/abJCdSQtmx+tPz1rVFJdjzRxp33kQyQy
|
||||
O38mMGfG5HmeZAnWd6pTBeqZmwqTLQrLWFwEx38ov/xhFbYfqXT7suk8YuRtq3qw/sIe13nbDlXi
|
||||
jf0V0MTgIxaBYgLsJXP78YnL213nrV7Qj89c1YdQSDX82cQE3GTmuOlAFbYc8g5KEgnF8Ls3nkNZ
|
||||
WIOu2MmltUrwky11aOoqQmG5hk7dqEVBzSPzMbIcLWDQQkO2XFNp5ZRCTZPJkt+6bt7pz+GcQCTZ
|
||||
SGYeSRLNs5GbH6XTspGA9TwTtJrb/dfz/QPpTbHod0lLkKx+QdvzrT28TdP2kc6P9WumRyGyySQw
|
||||
H0PB58JlmrSUlA5jNJb+un5Jg4205XvY/3GoC2lOyKeKIgKO5TqkxSI/tXQkiAT1ijKeciVb6SYI
|
||||
RNjf3pfIvZiu0JiisUVjzA+o3kRqqn1EcCWTa0o/kErD//FpJnvsTOQ6y/T+szEGKCjIXB8BSMjU
|
||||
lAgbLUqluh5pMsmM2U8USctcdrI8T7IEjB6ZkfvWpA3X4sy4DIeH4Phr3boGQnhoUwNiQp11y/KQ
|
||||
TYrVxaP4xnWtPCGacF7/kIKH3piOEU3l5qxiMBhiTpqOB29oQyU73+t3v379eTRWjXCNmUC6SNs2
|
||||
FFXwyBvT0DsY8jz38gV9+OTlHaSgS5wr5G1r7SvCw+z8wkumMcWXePJteS6IJmeACR2j2bvHZ7by
|
||||
F22Q+pKgR6ZGfleq6cVs5MbzuNbCRh7sJB1ONHNn/VjMfz2JsI1E0197gVWHMbYnmbmSqVhHt7/6
|
||||
tTBh80wrNyvKy+XiCVKzkd/e3iPA0TP+rt1lCpS9A3najuOpOpmgQmPfTyJqes6SPmsZtBWRg71H
|
||||
OQkLUl8aW/uP+Z/DSEtYX5n6mpSc+s61PNF3KvKzaRcf3/kwVmjuW+DDNNIKQJLueiOCiWQ6kHUF
|
||||
peDwNR6m2PM0FWSwsMhdNEueh91+UvPYKkkKHFZxituaTyIAfvlOPQ63lHLiZWnXrMZmnfGFtR1Y
|
||||
2uh2VPntrlq8f6LM3uEWeWLn3bKiG9ctS75cPatmBF+/7jz+8dlZ7uifbADsOF6O53bV4J61bvsa
|
||||
lVX2vg3t2PRRNVp7IoZ2Tux8Im507p2runD5wr6C6QsdbuvUqQBaEY7F/B9fzV7S4XBmNvdBEMRs
|
||||
k1Y6R2LJ+y/Iu4zMBw+zMjASvM57mZC0crE/PzSKJkamfV51Jk2bn6huBxhJ7B4INm47+vz5Cqaq
|
||||
XyCtZQc33YwFOKnZjBxa7CM6Z1kpT2syGsv9s6IFPFbzMef4RSfrt/NMuI0GqAQRdCp+fC0pmA/1
|
||||
99DI+M09WoA20PN4jiZtb0Mt155Pr+Fmg9bffnzESGulqKnvLYhMTn5m5MvW0RuwP1gFjpzli0Hk
|
||||
U5v2XcDuraoieb0pwuuta3hut2SguYjMUN/am3psBwge6evZSwYjN9tMf/MvmT2ePOfvt86YBM9P
|
||||
NEpD2zada8un4vNUiNBT7AsU8l9UIijiVjTNcLp6KfBOUF1QzZc6SmCwpYbc4GRbKX6ydRpvbqdP
|
||||
GnsJLKgdZqSpzVWH1p4iPPZmA48F4jyPTfw1JTzEf0hN/Th++srzePHDKuw8UWEjjdb2kU3TceNF
|
||||
PZhe7Q6LtKhhAF+65jz+7cWZCaYvnNs9FMbjWxqwcm4fisP6OD4ymUSPNPO0TVGnXCJtWoCZe7yS
|
||||
0AYx24wK5pFjlXjIjG8ww5QGJCBHfWqIKIpkyCN4ChFVMs0xtHA+hv1CduxIgMTVRLj9RIkz/AST
|
||||
pVEI0J5kMkpEIEh7GppTnySM2ssKsJXzWSaIOaOfZyRAO1LOve6+YPdJ/lh+SRgRh5TBfHLUnlkP
|
||||
7pJDULAWyq1mRBacaZK0Wv/BO5KBFnksU+SUbeUT5JdnmewFBY0zvz6lVO9kgYBo0e2WK3l4/1TB
|
||||
cMgcmiJu9g34GCsBnrtMxwn17Twf6RpIg918Pn29LVAuNyJulG8xHYz8fY08tcRkfp4mFWnLNHok
|
||||
nNEjIUSMFCNFwhH230HeUsrBhUJ5s5lNPAd4lBGv1v5IYqlD9Eljv/mVjaYJo+P3f/LWNJzsFBJw
|
||||
C1oy8oH71JWduHh2X9p6U/42CnDyxz8qw6iu2o4nMni2m5FDRrz+4q4znud//urz+O3OGnzcWur6
|
||||
joKSvLa/Gps+qsFtF3eOP2fLoN+nKmkzzOUCaM1opZBW5X2br2UI8lcq9pkHjUyQUqUhCNKvpNEY
|
||||
yTClAb3Ioz7JRkmSNAW0el1a7I+wfupaXnJKUvSxtSeRtuGApK3XMnfNo0WEoO8DPwJRoDbpD54s
|
||||
nRYfgmjOxnsOzNRPcDxBZsyUe41C4pMwne3k5n6JRhCBvKs34Y8bFET2+gf9HUtt4bWwQ4tRRNYo
|
||||
X10qq4M9R4FX3+d+hNkkJJmOE6r3Ap8Jtcn0+GSr/98ZZET6VAuPLJoujQtpxmnhjhYDOnsn1/Mk
|
||||
SVuakP9wEDNRy+Ykc4riQdqUwiNtuuO/uCYlzX9I+lf28cGJSvxmd61LQ2UQHjYRrp7bj09edt71
|
||||
6weayvGr9+o5YXNUjMwrZlcP48vrWn3X+rplnbhxZS1e2VeTyL1mdTur27M763DbpR1GUBMnqkuj
|
||||
+L0bW/GdJxckfk30b2ObH7w+HeuX9qC8eDTnfW7feo+FtH0+BScw8pUJYh7ZWJ9+VTgrpK3GP2mz
|
||||
TDyzQdraOoNrhuKEr2fsaQoqAkbzzBVqrYicYyRtnRnk9esdCGbuOF7CR9bztAXRPmagPaHnwm/i
|
||||
cV2fGNI23vm3/GLhLJ43kMhaeUnux1W690+Q9WPStPUPZk7a+nySNsr36DVHrF0J3LEmtQaSTAUp
|
||||
EiolFs/meslYxsnsBv/EnMwnyexzxUL/1/eTc9MCaftI69fRMzmeJ0na3No17zxtSAjQSELWXFo2
|
||||
izxMmg4dayST7GJUU/Hw5ukYHFETDoRCEJGwquObt7SgrMgu/cV0BY+9OR3tg2H3eQqv6zcZiZpV
|
||||
6z/TLBH1b93ShA+OlaO9L2I3t2SfOwZCxm/+8z0nUBRy29DdsLwTG5bVYMthJmGPOpqLHX6wuQy/
|
||||
eLce91/bMk7iVObmkUbY/ylqHkkveEpkHfKR08nQtOWYtNHqLSVu9eMbRjjelFqgDUoyRjIkbfSC
|
||||
HfGpgSxNommrKMkP0paKeGSSMyloVLOgddTH0wp7AkhbRtYAeb7yHsicyyOnUS5ARITSbXxmIzd/
|
||||
HE8xJeW9BSTrAxmaeJO5XyBNm4O0UXJ3ImxEgJKBzC9fehfYtieY2WPOnhMTRgCSmT5J/Uxecrlo
|
||||
QHWhlC6F/DxJ0uYvw1ictFnaMmeSba+E2nGNm5iMWy9AlzYgfTR/P/K81/JOFjSPL++pxeYD1Z45
|
||||
2Yi03XVpF9Yv6Xb9zraD1Xjhw2o3YQOPODm/PoqFDcM42lLGiKE/ZyCFdTAJiJct6screzySQsWA
|
||||
V/bW4u7VnbjhIreZY2lEM6JbfnC8Ev1ayG7qadbvp9sbjHPn1w/ntr/HyN2m8qrTsWZg6Tygqjz9
|
||||
sYvYi6S63IxZk6P2IidsXz5dVv2buLCRlLQFeZeNYSyQWYtfwkXBOaKjHqZFoWApGCaEtCG4b1dg
|
||||
opeHpE0PeHBa0paDaxYE+c2UVCK39SPCRgmmP3MdmwvL8uNZG+++JNNsv4tltEDl1KIvX8BLKlDb
|
||||
PnAXL9kG5YKj4gXKW/ezV/n7wuu+ySwyiDYsl6A+IFJIli3N5wvzeZKkzZu0eaaNtkib7pChndtk
|
||||
4TmcShyvfs1fy8lkgUQmXtPWOxQyfNk8w24ywlZbOoqvbmyBM2nbwEgIj2yezjiR6q6Swsl1U0cE
|
||||
Dzy02NCY0E4lCd9UhJxuZBRIq/1airBD9NUPN83Amgu6UVbktlm6cmEv7lrViV+8N43nbtMTg4hu
|
||||
40xXEf7rnen4iztP5bjPM2RrcTPJAl2gyAIOnwbWX+KPtJWwl9uqpTxyVmdvbupz5UX+EpsSKEcZ
|
||||
RUkb1bLzLhtLCKKKAKTNCJ6ie4/YfFz7GksdM4lGPR79NRFtN573mevjx6ONxqt+ZGVAGjYKT58p
|
||||
YSN/pdZOvqV5iULir1qSXmPnp2/Hq63IX7nE54IZLTqRP3ShvDdTtTMRNtJu+bE2GS/Mn8Hr1XR+
|
||||
cjzvk5q0+ZBK0+mROGnTkzA+eAcgUZJFkCzIJgxK3pBz0vb0jgYcaC5PBBERqkSr65+9uh0XznCH
|
||||
yf/NB3XYcbLSSKSteLFpnYRABVFBw2b0pZKOtCmpb9OMCbvndDmefq8B961v9jzs64xovrG/Gm19
|
||||
EbdPpMZTG9x+aTsWNwyivDiWw0cmKGlLHDOVNW2HTgXI5wTumL/1w2D29n5B2qpLFvsjkIRjZ1Nr
|
||||
2TKRjjMdC+UBTBvJ/83L12t0ND/GYUqzxhybR45XfwVuk2xHZsvxyriuZ9aWEynoZHUcBcAFc4Br
|
||||
LvavaaFIgDsP8WTUNH96BdNYs4Inls7GGB6vvrQiQvoBLRA7fYnz+h2aop0X5NjcMRPMa+R1eu8j
|
||||
mIvxhfM8TTnS5lPT5uXbBpt5JBwkTfRpg4Ooefi1xf3blALqXD3JUsJYEo1nAac7SvDj7dMTSjSB
|
||||
PJECbV7tEO5b1+r6vZaeYjyxrTHed3EGJm497t02GBSBoMPj95OpVS1Ox7Y/2d6Ajcs6Ma/e7TM3
|
||||
r24QX7n2HP73S7M9I5D2j4bwg9dm4o9vPYsLGwdy1+djyewwhScwIl+0QkwrjUU+nLDJSXrVBVxQ
|
||||
6c1ydxIhnFHr30SQTDvTBWgIIvDUmHnoMomOGSSICJEzrzQFFCHQbzCTzbv4in4uxi35xZzvSuHT
|
||||
FjCSWy7JRkEHIsl1OyJ318+GoKPnAakk7crKhcBFC9IfS8/bb7cBm3byZ3W82iLIfRspBIoym5vJ
|
||||
X9kvabMC3RTSe9OrnWmBkHJjUkqHfAKNS6oX+QeebCmc50mSNm/S5pxnvQORmIK5kob1ieRONG8r
|
||||
zLD/6bQtXvuBXJtHPvn2dDR1FXnborLy9Wtb0VDhJkQ/f7cBxzuK3ZEmnVnOdXibXepIbYPlR0PB
|
||||
hMhTncX4+XsN+M7t3maOn7n8HF7cXYv9TWUJbZ6QwfGtj6uxYs4AGipHUFcezVGfZ2IeyY+b6qtO
|
||||
+49xoYVC7fvB71zLk39S9K9s+bbRKvctVwENNf6OJ6GE6t3bn72+o98uZqStL4PrGREvfQo8Rv4y
|
||||
D4EnSOTEw+xRfO19/9HeJmw6zuGz5SfyXlYFvgkUiHI9R+V1yP8ckkoyX1w0Kz1ZOd8NPL2Z5xXz
|
||||
o/koifCAQzlpjxQgjX85ExnaMvTLrfRpHkrm8bTgVzCatiR1XJiHWjYL5ENO5URz4TxPkrT5IG2O
|
||||
z3bSJpzs1KrZApDATuBSOroVAm9LZ/XoR7uWJU3bnjOVeJpC9XtosRACrpjfi7tWt7t+61BLOX62
|
||||
vcEd4MNLw6a4iaBnvylpOI5XP5tmjr94ZzpuW9mJi+e4nZlqy6K4f0Mr/vKXC4xIl7Y6sXNHYgpe
|
||||
+LAWVy7oQdWCUSNKZs44WwbWsFN9AqOV42su4X4YfrRctCr5ifVAe7d/m3s/RHDBDP9aNtI0nT2f
|
||||
XoAK0q+1pqYt6FigiJdXLvO/WnskScTL7gCatkWzgbJ92dd2ZlM4kNEjJ07ICnL8RJhL5UsyYFos
|
||||
8mMWSWZq2/f5X1QhrX1tVXbGcJC+od+lojcHawfS7CyZAyye7ZO09bhJW74L1l7tSITd7z2PN6bX
|
||||
cTNJWlBIl75DJtfOY9LmmMOTadpUL5k2KI8ptOLfBhI+73xsrUMBEn64qRH9o2F7xEiT1EQYeXlg
|
||||
YwvKIlHbeXToo1tnomc4nNCyOaoTf9EqwplenxWP75xFScFRzS/6RlQ8tLnR1Ky4r3LHpeexbnGP
|
||||
QURdmkB2D0fbSvHKR3Vo7wuP+6hI14/2rH5Tr1AYZvJTaw/gp0bBS754M3+xjPX3P3cDsHEVUF7q
|
||||
77d7rfp2Bx8FqbB0Lg8oErT+ly3nTuN+zCNJ6GttN/PBOa5DL+bT5/zl1yJhg9pr4uba4Gto2b7+
|
||||
ZLjniW5DTLAMMNF180OuaGGordskKT6uGYlwszu/pobZ7MtZ07j2MGg70CLQikX+6kzt0dLJLQbG
|
||||
MuYmcj2fysxp/L5LivKzvkbC75k8onIhz5uy+OuzeJ42kYU7E23Hv0sWWlK4RkEo22z2Mh4efy6a
|
||||
66TA8Eholzle319n5DHTY7BrnxSeEPum5R3YuLTD9Tvb2Dmv7a8x/F5cigeFJ+Yl/yNn1/lZEE7V
|
||||
j/QdEc3RUW/G/+YhqlctblnR7jpXRQy/f8NZvH+iEoMaz0NHZND6PVpFeOnDWqxd1IXaJVFWfy1L
|
||||
fe5Y0tCRfGkDDt10/G+56kTatg2X+te2EW6+gnILAj96kft7BAVpqD5zPfA7G4L5FFBd/WjZgmpI
|
||||
SOCheyLhjHy6/GLNcv8RL0+1cI1asrofPQtcsYyHoU4FIokUYYxI3vDIOM+xAVdzJ0MgkiBSUU4C
|
||||
kUxCn7ag7Z+L+pE5YUWaBNqUuyxtwCNxQWUmX1TJVt8GtRag+YPM12ku8QsKxkKaNj+guZEWnpxa
|
||||
R9q350j2+4jmwhl16QNUkckm+Vp75cskE8OBIXtbks8YFT/4+DTw45d56oCxYt3FwJdvBRb7GCM0
|
||||
jsjf/NCpwniepiJSadrgRTOc1CRO2hw+ba6IkbCbSKbMAyARGN39YTz85izDNFBxmh4yYbWiKIYH
|
||||
r29mu+0N3D8SxiNbGjEwovLzxEAhJgn69m1ncNn8bmi66m0yGUQQEc5VFQ1nukrwP56Zj+6hsOvY
|
||||
EU3B42/NxNWLelBd6p4ZV8/rwaeuaMfP3mswgq6Yeb/5rbO/2/sj+O2uBlwwfcAzqMmEcv0pPs7J
|
||||
sf6ND3gC7SD5aq6/jK/QPsGI21t7+IvRD1ayc+6/E1g+P1hC6ZOtwOvvs7HU5a/PgnYr3Y+hxevy
|
||||
56+3diVw8eL0JMvCjoOpzYpI0CLTyXT+hdRmX2Pt18IEpY9O5C5v3pjaM8M8bflINnKRpy2XQlYg
|
||||
Hzwg//O0TaCQSWSByBClyEm3UETzAAVUuuSC7C1sBO1LSstyiJGMU63eBMYJsjC4+iL/C2cnWoDj
|
||||
ze46v/weL9kGmS/edxsjOytTH/c+m1t//BKbEzvSX5O0WESIFjT6v+cTzdkZg9Qvp1v9kTZ6D1Ad
|
||||
aXEhpf9yAT1PkrSlSq7tUChAd19EJGwisStI0pYsVItrdkzRgroHu80Av941A/ubyqFqjiY0ydfn
|
||||
rjyH5TN6Xe378r46vHus2u5naNWfTTQblnTh/vVnXGQvW1g1twcHzpbhsW0zuYZQ/H329+7TFXju
|
||||
w3rce7XbaJ7q/OU1Z/HGRzVo6Ym40huQYPnWkUqsW1KFuvIRVGQjBUAQTVuSIicwjlfYC/eC2TxB
|
||||
aVmJ//Om1wB/fg/wpZs54dl3jBfnS4Y0Wdeu4ho9MvvwE61SBJkN/ugF4MhZn1q2DMg4OeJ/50vA
|
||||
95/lfiyptFhXLge+cTeP8OUHZNa54wD3B0lWr4MngbNtwJyG9GSW2v2+24H/eNpfhLFkAin1xzJG
|
||||
np/alP46udbg5NpnbjxIRrZ92nIdhTPfo0eSwEoCdjbqR3MSaWOsucnvNReYWplU2qtQiC/i0PPk
|
||||
e27LQch/CoDy6WtZfdgL+dm3gO6+1ISI5m0ibX5AudkOnOBz8HiNF7/3H2QcU18u9pmbjd47x5sy
|
||||
sybxAs3vRNroHebn96meNP4/PJIlEib9+POCtHkn1xZNGx352JxaNsXxQ4kTC6r5kB1/tLElq2vq
|
||||
KcWPt89INKXYnhTiv2YIX17b7Lp+R38RHnlzJk+z4BRK2XnlkRi+ef1Zdhktp6341XVNeGVfDU53
|
||||
lkLR3IPxsS0zceOyDjRWu9UqC6cN4AtXtuD/e2MuDEWgljC3o2v1DYfxwt5puGROL5bOGGDf6Vno
|
||||
cy+pyr+PopzAzBdyFHj0eR4JkUxsgmjALFL2hRt5yQX+6zX24vo4mDlgJv1KmsbvMBL6WybwvPAO
|
||||
e1k7TDHpewrEctsaoKbC/3V3HebJd1MRTuoD0iRSRDNqz3RYvQT4lz8EntvGi9/8eXQPt7P633o1
|
||||
j5p5rIkLEHlBNgI8+gUd8j9HJCfXJCongg5SE4tvfio7v0uC7xMv8XnEEsippPOnvXoF9+F68hXv
|
||||
Z6yazQN3XRPc1Hss81S6xZgv3cLb7pdvcM2bqHWj+hK5pPouCBA98egZbqo3Ooq8hN9xTO3iNwAJ
|
||||
maCTdixbfURtR5pKIm5+2p6OoUWD3R9ncbELUuYZd9IGN2mDS9OmIDOvuIIM958/eGzrTDT3FnHi
|
||||
5WhDMrH42jVNmOlBeJ58eyZOdJQmzhNIs8IE6E9f0YZL53bnvP7Tq4bx4MZm/N2zi9xDIAac7SnG
|
||||
41tn4y/vOup5/pfWtODl/XU42FIeXzBQhAG941glthyuw4yqEdSURfPgqZNj3UJPHxu/z3NBPp+i
|
||||
am3ZzX3ZUq0aZ7Nvyczp8zfyQkSL/DgosiMJQ0SmigM6r5OWjTSZbZ3p6/P2XuDGyzmx8kOc65iQ
|
||||
+JXbGIlkBOyd/UBXb8LMUhRSaiu4AFDP+nbudIdJp19v9kyjcuTi+PGMgKBn+Xg9B9ccy7FTaQ4U
|
||||
7pXmE/IxnZYm1QgtaHxyAzfrpnmIni8iQeTLSgL1pUuApfOCWw/4GgsZ9gvVmfynqMQ1jAP8uW+s
|
||||
T+8j5lpQYvPfzsPAvqPjPFay/JzQ/VOfkZ+cL9JmmjNm855Pt/gnbdRPNH83VPOFAynz5OF8kqxt
|
||||
/eiPLNImXoc4gOr4rCGu+Invt6LFA/bo8YA7IKATSr60n06JazW21aHpZlRAM5OtppmRAnV7oeNV
|
||||
xqZs+41rBR/Vu05V49md06DFuPbS1kZsQr90Vi/uXt3muvbhc+V4aud0I/iIK8E5E9xmVQ7j3jVN
|
||||
sDRDucbtF7fht7un4f1TVUauNhtYHZ/dXY9bL2nDKg8SWVkygt+7vgnf/tkF0BSHT59GvE/Bc7vq
|
||||
ccWCblwyJ8qIrD6G/tYdW6vp3P1sFTj3ATnWXRYWPj4DPPQb4A8/zYT7GRNfn/cOcA3gmbbcv+89
|
||||
FzFqg/n5eeGZLTw4wLCPFWo65kcvccEqCHG2NIBjeYdrWW5PLeCzFZRraOP07GoBuaQ2wfeZSX3H
|
||||
cw7UMHEyo9g/5CNLWhS/z1kQDU222j4bbVVRysoY602m3W/uBvqHx7e//N6/33G8iFIb+Ay6QkSV
|
||||
xkhTR3afjxPngGPNjFBf4tNEcjaPdNnalZ0xImWe3K4peHmYpYrgT1A9g+bBndjNqbIr5GIL/q9b
|
||||
pm+a+bdm/i2axWme+zMtI6MKfvjmbPSOhBOEzQogwh7MEPuNB687g/IiR4h/nbRss3CuN+J+khR+
|
||||
b19e14x5dQMYr0CkFcVRfH3DWRSrMTcjZ3XsGg7jka2zMDSqep5//dI2XLesk6cAcI5ijUhqGV7a
|
||||
W4/zdM8TEGjV6nte9EnzDGSrUJSs7/0E2H/cv/9YLkDJbP/t59zXKuP7meBJncwiX/8gEYDET6Fo
|
||||
ZWQOmi0/Ct/ELYvtmfG7JcAC3fi9W4ItHGZ1XGZaZz0Hdc5mm07QgynWgRaCjpzxl2bDL0jQJ40I
|
||||
afCy1bcTPYmROd+r7/F5KW/His9rLQ6Qm40IPfmzkUljNu+Hrke+bS0+53daPCWtnKoW7vMui588
|
||||
bclicCB5tHuvCPnJPuseCYontJAmTUtWqcQN244TbtoQ4E2tXCZl86F6bD5Ya1/2sPqA9cjNF7Xj
|
||||
2iXnXed9cIK0cw08cIfzwWPnXTy7D5+9rHncG3Tj0nbcuLzDO/caq+umQ3XYdKDW89yiUAz3X9OE
|
||||
ykiMj0bFXsjP7dX99djfVIGRqDLO9wbbQJcTh3f5iBG2f3yCB+QYGWcrVnqpkf/ID57lPmXjQdjo
|
||||
Pjt6snsfFFjk+8/wUP9B6/7qDu5Xl+06jelFjhwLBrkgmeNM3PLmPgNqWcd9jplgwmYJz7s+BvYe
|
||||
y95vvLkLeOldf0Qw2wslRECpZBM0/zy/nScXn7D3UZaeE/I3JPJT7dM0lObtMS0YpijkR3zcZxJ0
|
||||
w6ST1XtGbWEtdk0J0pWEF6X8DHefqXoSxzdP8oXkOQR8788jxjr2iV0wpfOJgRGueVJDcNmWUr4y
|
||||
Ii/3rz9r+LTZVuViKn7w5hxEdcWmmbNQFNLxuxtPo7x4/D1/KULlg9eeRVVxzD4hmOOHeO9DW+ai
|
||||
eyjief7l8ztx2yXnDS2j07+SFJwtvUV4ae80tPbkMrulwzQS7n6VE0/ycuYc8PePAr/clIE/WYag
|
||||
Fcj//XPgp6+YCbTHaTWftIu/ejN7fgNE2Og+Dp/i2spM6k/E9T9+xYlrPpC2XK/mZpoHLp+0QlnX
|
||||
nORai6VPLU2bs4/oOSWi5SdUfDpQsAgiOGfPZa/9g7QTETaaw/YezR5ho3n42a08AFRejxM/WrbZ
|
||||
PCedH9CcTZo20sbm4r6IDJIWz68lC9Wb6p+t50nKNznWC/jcbyEsWgsqjvD/nvnZJkUQEt1m6pjw
|
||||
c0pmAmmdpblZRZJGMIJqeDjwPfXBdOxvrkwk0hYImGoEEWnGytlu/6+X9jbgvePV7miRdC4778bl
|
||||
7bh2afuEdciFjT24d00zvr9lDvSoUDczBcCh1nI8taMRD2zwzv749fWn8fr+WvQORxClE4X7pLZ6
|
||||
bX8d1i7uQn1FG8qKRjPsczj6007SbGaRnv0vIymlXJAYAn7wDDdV/OKNwLWruY9EtkHBOn61Gfj1
|
||||
luxpl4L2Kwk8FEyAgg5UV2T+u9v2AD/8DXsxN489hxqZJVHi2gc/CaxYGDyqpx+0s/YejuYg6iGC
|
||||
a6kyEXbHQ8hHFu8j18lwA4X8R/7nacsFYbNA2jaa2yLsubr3Vh6EKVPCRkGcyByaIrMGNenL1tj7
|
||||
wMwFSXkwL5yXeTsRYXnyZW7aPRKd2P4KSty8QCkZiPgs9Bktk7Rsx87mLlImkWBaFKXgVOS/nA7z
|
||||
ZvC6U1qdWGxsz5OMmJ39uUx3/u2RgQxp87Qp3v2pp4t0X+CkzTKBtEweif0YW2G/IcBTwBJiYBSE
|
||||
hMwiVcFMzwx7aISsVxNkQPEIuXK6oww/e2e2sWJi82UjsHNnVw7hq2ubXE9JR38xntg+C6OkZXNO
|
||||
TGyCqS6O4v5rziBCfmUT2B+fv+KMYcp45HypPSiJWeefvzcTt6w4h7m17uyP82v7cc/VzXhm9wyc
|
||||
7Syxkz6GwVgIz+5uwMWzu7C4IYM3g5CnTffI0Sb2tz03GzeF5UWXE5gP0Krg934MPLMV+PR1wPpL
|
||||
skPeKKrZG+/z65KNfzYTRWeSFJmCsOw5ysOMU34cJUCUJVqVpVxyW8lhP4v54ylU+Z/+Ow/T/cWb
|
||||
gCVzx07eSLAjAeDFt3nwGT+C2VRNrp1tgShT88iAb8K8I7+2Nsqj/iEB+unN3If3y4y4rb3YfyRI
|
||||
8mGjZ4gIjuWDmolpajb60roWLRpR1FsioYHyxoFHmqRIt5Qq4My5PHieENw80guNdZz02KLmpgAF
|
||||
ILFMI3MFy0TSD2mjgCUW6STfwrE8T5K05QFpg5emzQPO1GuKwNeQ5LuCaj9dd0cSdEUYtJvI2ffb
|
||||
r6UoSvx7xZTerKAn1ndPvjsbp7pKeE4zj0b72vqzaKxyE5pf7ZqJA+cq7L5s5vlE/r60phkrZ3VP
|
||||
eJvOqBw2kmb/w3MUDdJ+f6QtO9tbjCcZaf2L24565l37wlXNOMEIX0d/BIPDIdcA23myGq8dbEBd
|
||||
eRNqM0gBIPabPWqk10TlbfYq5y//+OgEK4/zlx8lZaYk05SclV4mRZH059PK5f4T3Bzx3f38pTWS
|
||||
R5kfqFDofHoxblgF3HQFv8+SJFa85LtCq+tbPuQkKFdmpCQc0qo3/c6qC4DLlwGrl3KTmRIfFsZU
|
||||
TxJCTjTzKHA7DwUnliTEkXDrRwu577ipvcvR9UngGR2n9azDjNT+YpO/RQoS3PUs3ieZvPUOBrtP
|
||||
MiumfH8f+/Bv2p9BP40VdD/0rIxnoB0CLQql6h+K8Pr3jwHrVgJ3X8Of+2R9RGbUdA8vmIseovbj
|
||||
Y5/jhbQ5hgl4lvpSHCuH2fz1Tz/mfrGUj5HmaEpRkGxuofluqzmH0TyRL/B7/+meE9pP/ot+oxDT
|
||||
wt2J1tw+FydauGUJjTu/C21ez2qQ5yndMyAxJu7m+XeqqJFxcXjJDT/RFTUMRY0YRTW3vBQ59oXj
|
||||
W4ovb/xNW4o1b2xNpyS25cQlEU0irnlSvJIDjC/uX/CXCIfDiBglxATICCKRMNua+8wt3x9COGQW
|
||||
dmxIZcXYqkb4f5W2dH8K/6wonLRZ92sRuL1NtXjg0ZUYGA17BhG5dHYPHvrKhy6ftBPt5bjv4VXo
|
||||
GCiy96LZtFXFUfz7Pfsxv24A0Zjij4H7gZj7TUkw/jhp1xXUV46grMiufx8eDeF3n7gY75+qdceX
|
||||
ZSfT8Q9/7UNcOsebZG49Uo9/efECHGsrs2t1FZ7SYNG0AfztXR9j9dwuhNVg4QotMmYnbbCldNDM
|
||||
bUxjW/Z2HaUyyrf/vOkGHGybLmedbBD8ukR+MfpMQgKZ2fQPcrJGgnY2NVDpsGYF8MefA+Y3pj/2
|
||||
33/JBTDKZZTsvoiokvats5ebFJKgRsKE1znjBcrjM6chkTtu6Vz+YqcXM5FhImlEJDX5lpaQCIz6
|
||||
aq4JoWeMQBosImz0fLnM1PIYNRX8PirZK3hmPdfc0D3QXGEJ8hISEgFpmiB7Jv7WzPRSlmtWzNBw
|
||||
6FbRRvnfWmzzBz9deD2dFnbSOmcGbpu8n96Vq3DaUNPtmjUzqqQV/MLab1hLKjyqo25pyOhcw+FP
|
||||
iUfZNNqJ7VfMCCKWiSTPAxfCDzbPxYAW9myzsKrjG+tPoZx8tXQ7c3p821x0DBW5VZsw+hsDw2F8
|
||||
95fLxk6B9QA8mh0XG1WxuLEPD244jXWLE57ZxaEYfm/jaez6cTVixEYdprQDjNQ9smUe/v2L+xkZ
|
||||
dJOutQs72fU6cZ6R1J6BcMK3TefaOiJzr+yfjnk1A5hR5V+q1/UkyxrxgCe6/RnQdHvSc12aCmQT
|
||||
9PJvac+7adX/wUnGgnhfpB3MJxgJggXtHmnQJCQksoPzXbzsO1rY90ELTVQkJCRywN28uJzjs1dg
|
||||
yDhf0J3CuiDIKh4kLv4dCleAtedgE/NxqaZvm+bIz6UkiqniSuzXDA0jN5FU7SaRJnF77eA0bDtW
|
||||
x/3eHO1N2qMbLmzDtUvPu0TH907U4qV90+052RzEalRTGMEpcZEq2+hQkMiK7jjG4p5eed88rycc
|
||||
23mqDkeersC/fHYf1i5KhNK7an4H7rykDb/ZO4ObdIrEjZ275Ug9Xj/UgJuWtXgQ2Bg+saoZu09X
|
||||
4cBgJWLmuVRHxYxE+eLeBly5oAMby0ZQHI6lJmmufhdWOpxjwMrX5xgfohmlJG2TdD7NMBqhhISE
|
||||
hISEhES2SRu8o0c6POAU0QROt0s1ChDQdTlvW093RI80CpG1OIGz9msG9bIRN1P1liBuMP3WNNNE
|
||||
NOHnRmaNj22bhygjVy5fNiOIyCgeWH8KIcVhZhhV8fMdszAQC3kHgBHvJpaEXOm+x5E/OP3UKHn2
|
||||
SBF+9t5srJ7bjZIIZ3PhUAxfu+YUth+txfn+Ik7chB+kgCoPbZ2Dy+d1oLbMnaRmRWMPbltxDq29
|
||||
xWgzw/xbLnDUhp0DYTy7uxFLp/dgQf2AL+HZaRaZGPL2/uZHmGTdSd4FuicxKefTYMRNNpuEhISE
|
||||
hIREhrJGKp82L1kjHDcNU2Fq10zhVXeTOVvCYVOQ1tPL9vnXaMJ96JpA2jQr8XYiAbdlbpowkeSm
|
||||
kYYeTaFjuEZNV5W4iaThw6fw3/nNnlnY11KZ0DiJjcTK76xqwkUzukzTy8QB3QMRwxRQ05KQMCXJ
|
||||
aFACSKEexya1kkxiOmmYLJ6rRHd/GMVVI/H9S6f14FOrmvHw9vmJyyuJc/Y3V7G2mYmvXHXC80Zu
|
||||
vagV7xyrQddALaKjqque7x2rxbvHpqG+rBnlRSO+qbp1HVuwETHpuu7sf2ErtSuStSF/oxFKSEhI
|
||||
SEhI5K+MoXswNN1D04akedoc+cS8kiMno33ch8txsKIUxBJ0QoumCtoWLf5Zs2laHJq2uDbOMpVS
|
||||
zM9C8BVWOgeL8Ysds7yX5dkhc2uGcO/Vp+JfiprN0kgU1SUjieZMRs8VD4ruB0nInZKKrHlcm+pX
|
||||
URxFUThmmpYmcA+7t5f3TcOp7vKEts28BpHRx9+agxsuaMXs2n7XdRur+nHHxS040VmOM+0l9t9m
|
||||
5w7FVGw+XIsr5rdhYf1wapImknXYCZu9sH7X9LhWzQhMIhSvSJMSk2Q+zdO8XxISEhISEhKTmLSZ
|
||||
QoiNrFkxNhyRzG3JtdMWG2nT49qkQjMUIsFcU/WEdoX+1mBEDoyxD6qmImRED2R/K4zWKfx44zz2
|
||||
OUaJ1tQQv39jP1eD6TyVm0mCdbx/fBrOdJQmeJzQTJRI+96rTmJ6xYBntLbSoig2LGnDB2fqDL83
|
||||
PZaEeOken1N9l4aEeV5f8f7OChZ6/YWtjLiNuO6jvnQQX1t/Ev/9+Yu435x4OmvCc33FeHT7fPzN
|
||||
Hd4RG6678By2H61DR990DFgpAATSd+RcBU6z9p1VPYBISPMk5k7B3J7qwYwaGY8eyfdprK1jGidw
|
||||
RtGtrRTUJzNpC5rjSI4FCQkJCQkJibGTNsStHJ3pxezJtVPK7rrdv03xWzfv5NL5I6AJ/mq6mvBn
|
||||
01VB42IWxpY0xjiImGmMjWkq900zQvsbLIVC/5NQz1MAaFBgBfw/3FbOCJcCfdRRAUZ2ZlUOYsPi
|
||||
Npd2SsRnLztlJJr+zd45GA6HXB3l1JJ6DRBXN3ho/JJ+r6QWciPsbu9Y0YTPXUEZHDVPIfaWZU14
|
||||
ce9M7KAUAB7E87d7G3Eru8bl89yhBMvCw1i/5Dw+PFuDgZFS2z3Rx47+IsPvbXgUnuH/3fnXdDtp
|
||||
A+JaU01LhP23NK6a4OumCZpVCTn3Sp82CQkJCQkJiQy4m40vefC4pLJGwjzSXD7WBSNKRWB+ca2a
|
||||
KLnqSaR5RcnvRiMBnHzRGEnj5IwRsxiRMvZZiRnEzNqvx/crRsJo0sLYjSBhmFiCkQa6hmqq2qhp
|
||||
ugcVe1MImqua0hFUlwxxFWgSlIZH8d1bDmDNovM41FpjnqsnqLSDmCnWP5puo91WImsjgIp5tpkG
|
||||
PJ6DTbGuq5upCgAhAbZ5l+bNGASHtc/Sxl5suOC8oeVKxj3LI1F8Zc1x7GuqxgAjtoojouVgLIQn
|
||||
3l2IFTM7jXQBTjSW96NEjbmIJF0nxvpvZFQ1+sSrHZ2J0TlBcxI4wVRWc5hExmKJbUyaR07qiTQg
|
||||
C5OaNgkJCQkJCYkgbE13CBy6ZbqD5D5tsGnadFE3B5/mkeYPOH3aAhG2IInBsi+gGam4SKuiqnHB
|
||||
XTN82VTTLE4ztGfcLE4TNG3snjXN1LQlrqkaOcm4xk0xTSRrSqO82Ty0Wd1DEXT2hxkxG05b32sX
|
||||
txglX6GlyXG9ZkELrlvSipcOzuRaR7E9GOHafrQeWz6ejhuXnnWd2zkYwUhMsY8/808ii0UqTz6o
|
||||
JamELj4YXmQNiWAjFlkj00gygU2YRSY0bVK9IiFVbRISEhISEhJBSZvnZw/VWiJ3sMOnzQhaaGrS
|
||||
FIc8bQa758U8JiE3mxobY7+lt0lAEc0pTUFb8bTFG3/iFte0KUoiyASRMkurpsY4SbO0cKOJ4I9G
|
||||
CVvZD4RGJV84VTXaQlG5geSC2h6u9fIIznK2uww7Ttbh7pVTI4vlfVcdwTZGznq0IntOONYuUdbu
|
||||
j21fiCvmnkNZJIqQaepIZqm7Tlejezhiyw9njC/GkevKhlBfPoCQMpqctOm6Y5sga4j7spmETfxs
|
||||
aNiIvMUQG+WaNuN7OfVM+vnUz7EaIMeChISEhISEhE/Zwe3j5BVj0JaiyvF92ArnbzN99CqCCSWs
|
||||
FAHxLNuWJA0h1KGYQVpkkk5t3PgTNx3FTCgfMcwgEyaSikG2LK0aCetkDhgPLGKZRdLfMaveVk42
|
||||
xQj5r5smhIquGDnaVjSeZ6RiGOf7SxKmVCb5oEuQWeBlc85hZlXfpB+si+vb8ZnVJ/DYu0vjJpnC
|
||||
6gA+aq3G07vn4dZlpzCjcsDYve34LGw/PgM9g0WuMUakjaJOzqjsY6Qt6qnts6cZFImaW+NG/R4z
|
||||
CZulaeNbRtqIuJOprDSPnLyTqQz5LyEhISEhIZFFtiEKGLptH0wDx4S1YzwQifl9IoqkGD3SJGJx
|
||||
LZruSA5gFYOo6QKBs8L46dzBSBGIXBISFg9QMsHEbUQvRQmGEyZx5KvG/lENTRsT0o0okFzTFosh
|
||||
roUUtW3GPYS4ho2bWCo8EqXKCRudX1/Sg8vnteHlA3NddSDTyhOdlfjb5y7DH127D8sbO1wJticb
|
||||
7rmctG0zcPh8tSsoCbXp07vmYXZ1DyKhEXxwqoERvAtxor0iQcisIaJyondhQycqIoMGsUr1vNiI
|
||||
WzyRuhA90owUGYubR+oJ4mb4s8UwErWiS8ppaBJPrZK0SUhISEhISGSZsOl2IcIka7p4jOjTBti8
|
||||
1wTSpts0aIqqO4gbYKd9Dk2bWMG4KaBJ4pCCmHkSt/FBVK9AMTrtvmpGGgAS2BUj5D8J7CojUVyz
|
||||
ZkaEZH/ESJMWU+IkIBSyfOIUI/m20TSKahBBMpe85cITeO3gHIxaJpKi1pKRgL0tdfjTX63FuoXn
|
||||
sKC+2yB7YkhQzSK6JlGmz5pBspV4zBGLVDpbOqabmj/FTISumInCSSOoKkgQdn5N1emP6LgofW1k
|
||||
O1Ac3wvaVU3nW9V5KfZ3USiGkvAoPAORsuu29JbhsbeX4aniRfiYEbteMosU7X2VxLVmVfXjyrkt
|
||||
qCwaMkwXvYVr3fUIOXOzWeH+YzHLRNIkbYamLWaaSGroHSlBNKa6rikxSaZXXYyVm34ylqRNQkJC
|
||||
QkJCwhd3c/9h91ezrME03Rb636Vpiwu3AjFT4lEkYV9S1nU3FfT0olMEYmbtUuJcU7F9P/4+bYNa
|
||||
LcrUk1B102+NMQwS0BXTBDKmcqJDrlWKpWkzzSLp+1FDaFM5YWP/hVSuQ9QMvzbuF6eaEShXTGvC
|
||||
dRecxmtH59GJ7txn7Df6oxG8emQ2cHQ2dOE7JYlwqTiaF6K5oUDgFLFHTNLmcDOMK03pmloy7qwk
|
||||
SJuwKGD7LVvddHudRHNIxWxTI6ed9XPmOZSL7nBbFQ9u40HW4tdi2+uXnMbc6k72R5SRU6QlbTaT
|
||||
SAhmkVoipL8matgsnzaNk7f+kQhGNVUK6pOYtAUmeXIsSEhISEhISKQgbboo0Do4lC5wL1ueNsGF
|
||||
x6Vpi4fzFyR6RTw5bhopfLa2guObbgYs4WHprXXrNATNS/LJMZFrH25EXbFukCtDWxUztW1WWP/R
|
||||
mGG9p5raLLFQfUOUz80wiWTETQ2xzxr3f1NVbmap8qAmnOzFcO/qD3GgtQ5neysSNpYicaPPpoWf
|
||||
4tyfTPnl0FYpHjFeRJIGR/MrcBB+L5LopSyFkFpAPMaje+2BaYSdauIaznvQTSJrq5yooGXnXj77
|
||||
HK5fdAKVkX7ERp3aNN1zeNk1bIhHjRTzs1nJ1S1zyVEaB+zzaCyG831VGGLkWgrqEtI8UkJCQkJC
|
||||
QiIpj3GLzq7jdJtSzBnyP6FcsPu0mRey5WFTxFTd1t+Oz2ZSYsUMdW/P/wWH2sWUvG12cWkyN+eQ
|
||||
uO3vvhKLpz1vmhUq3CwypiQCjRh2gErCPJL+GRWEf9KuWYJbCIbGjkwiVdPUUNW4KaVimiY2lrfj
|
||||
W+vew/96cx06hkoSxM2zV2FXg3mRKD3JqFCQ3D1QSXKMnr47XAm39TR18FNfYfwmPcRJAllbL6zr
|
||||
wudW7jXaVPRlc5lCOv3Z4MzLZjePtFI7xM0kTbJmkDZG3g63NxomklJOn5zYcwT4q+/rKClKf+yp
|
||||
VqB/WEb8l5CQkJCQkIQtNWXTvawTxeAkekLxJcaMFAPoJUhbsmxuhlZNS/i4URQGUdtGdm4qt2Mz
|
||||
8rWRdk2xwv+LpA92okaVUHyEHcmhRNTa34BoQz27hXZO2mImeVO41s3iVKNCXXTVbESzPWgbIm2b
|
||||
EXVSjWvaVNXyf4NA3BSsnHYMf7Y+iu+/swZn+6sNU0BDo6Q4mikZKVJ8tIvuJkuuPHF6wPb2Imyp
|
||||
yJqf4511VFL8rqDmvKihDfet3oll01qMLOdRLdVz5Eys7VPb5gxEwrZDUQUnuhoxMFIsfdomKbp6
|
||||
eZGQkJCQkJCQCMDcUlI2b8Jm+bNZVo2abb9uRL7T4ts4aYPA5BQh5KRF0gxtmkDYdIPI0QVUIVW3
|
||||
o9APKGrCRDIJceN/KWkaITcatzMjl2NB+CUzCAdxUMWIGqkIwS4MBduo1YgmQQ1xs0jaUiJtImlG
|
||||
UUzCpvDk2jxnmxLX5hFWTDuBv7muG7/YtxrvnF6AYS3CIyOKys10t5xMmxZEe+Zh2uj5G8mukez7
|
||||
IORNSVNHlX9H6wKVRcNYN/847r5wL2ZWdBljM+ojSZZoGhlf79DFqJH2oCSiaWRMCP1/rGsROocq
|
||||
jSA1EhISEhISEhISkqilJGuiIOokbKKSLM69NJO8aXHXNLd5pEPTplgMTxWCkkAkcpqxJT8ug8wJ
|
||||
GjfjhwyyoybYo6VVS+IfpaeRg3MlJm9uvg33zdvKbnPQEMzjVbL8qmJ8G7IE/pAp2OshI/BIyIi2
|
||||
qMW1a/HP5CdnaN00M8KjEieBhPqSNnzzildw7fwFePv0Qhxom4Wm7mpoISX++4qfDAg+FT5xP7Ek
|
||||
7e9JwkQ/NfHQVNo+xeOayX4rGdkUxndEiTGC1oPlDc1YM/cYltQ3ozgUZSQ6wCPllVgbuou0uUwk
|
||||
49EjGTmM6XjrzEqcH6iSWjYJCQkJCQkJiSlP2fR0AqhDAPYgbNCsZGxxrVs8CImlabO0bRZps0vP
|
||||
pmZNsTM+RTHVc4rg12bzdTMD01s+bBYRNM5T7cTNGbFC94q2ofhvmAwxHIvg2PB1WFL0vBHD3ohq
|
||||
SK5sMbvQT2TNqocR4l/n2jZNt0wiE1o21TSTjGvYrM/x21L+f/buJTauq47j+P/c64kdJ6nzImlV
|
||||
ER6FUCG6atW00ECbBoIQSEhIXSDUBQvKggWPsmHVSiwAIdFdkSqgAglQVFWqRBVBmzbkIUQAQR+L
|
||||
qqRKWprYsRO7Tho7Y8+cw73nnHvvuY8Zj/NoHPh+pOTOy3NnRrE0v/z/53/y4/YNx+RjG4/Jmfkx
|
||||
mXx3LAkFY3YT6cWOq9SV2hv9Z6Z86Ai3BciHUPrtAPIc5Pea6+piHaEyxWeaP4dpWOoWpOzyOsXm
|
||||
Als+HTLc66801SZ4ZSZ87cb/bfK03GoZWTtyUTaPnpcb18zKhpHzSUh2CXpxWYGtHtzKLZLSsK4t
|
||||
G0biL6dVttlbZCJtp9WRsIoJAAAAvYNaOVcV9/UIbH4PsCxzuXZIHVTjgtCWXJ9TokdNVkXL2hsl
|
||||
HP1fPKESXa64+cpSMRc/mFqYthT64CZ999LuUW6pxYMr69Cp+2XbB/8mw3rSBwhdfMZxNSC7aZM6
|
||||
jiXWxlbSbECzlTbtg5sOQlsR3kqBrVJ5GxualrH10/LR9fx7b5KuN9Td5fzOmHpwCxd1mt57tblK
|
||||
W7HZ9lxnRA68dYdMXVjv9soAAADA/3tCG+x+U57zYcLAVglqpuextE9bNJM8zWi+R5vyD1BB4vOp
|
||||
L09/aVDLbhO/kVk2LSLYO84V3lxwM8bdr0qbfTXs3tz4xq9OcEvXlD078ZB8aetPk5c4XzqPsR9m
|
||||
ui4vXcMW28/EbUht7L5uUXX4iL+crWHLLvcLbKrh/SrFuqnl/ceG6Xt7GNxKbZJ+b7ZSiNPFfm2L
|
||||
Wsmf3vy0nJi9WTpaCVU2AAAAAtsSX0wrjwuGjlQqbSbMVhIci9Cmk+NiEdpUPG3M4s0iupT+8mpa
|
||||
+kNpqSNdt5Yf3Vo2E3VtEDHajcY3wSCPfJ8w+33XB7q0Jc8Us+zLAUX1GZ159YLMxLub5c9DX5dd
|
||||
mx5378tvEhYFi+2yL/bpbe6YBDXtQpwd89/1bZLBOjY3rCVsiZRKcGt+T4S2KxPaSsFNKgGuUmkL
|
||||
q2xZm+TR05+SVya3y9xCS4qN4wAAAEBo63+/6TGEJFuKlnc29qmyJV9IO8l303NFaJP4pOj2bcX4
|
||||
fjfmP1/f5oOaDWbZ0S2YEjsj3y8GM74d0GUyN4lSsm0A7F5uqhTe3BsKRwiaPgHm6lY5Xp/5SHKK
|
||||
h+Qzm5+Ulp5zIS1SRfgMQpvOWyOjIqiJKqptfnpk1h5ZCmyVitsgWZQQN3hIa/qdMtIwjKSyrk37
|
||||
8rNdz5YE8n9M3i0H375TZtujBDYAAAAM+H20UmmrVNlMvgwtCGhS6W60g0i66X1tZfRUHtqSsHXO
|
||||
ZOvXRPv2SF20R0p4TBcXZe2OvsrWa2h/nsNMPmhChZtuVWe+N4QTY967wPJaEtwm5x6WL3/gCRk1
|
||||
48nbjcT1m2Zf7NNNs9OgZvxm2sZuE5Dty1aENl0aRNIY1mSwvcMJbJcX3EzP4NY8iKTdXS37T+6W
|
||||
lyY/LvOdYQIbAAAA+oSzpi+f/aZGFqEtr6hJ1+Yq8X+Mya93krsv5KFNqda/ki+tD7gvqNnea11X
|
||||
RYt8cLFVNuWCTOTvU5KNKMxDmMmWpeUVtuwGdzR5xS0MME2h7dqElbPz6+Q3//6W3P/+/fLh4cMS
|
||||
mwX7PtLwGNm1eWnrXBbUdBDUsjVtqlxhk+aK26ChTAmhrfnXZOnQ1jiQJKuwSfm6NrG8PXeL7H9r
|
||||
p/zn/E3StYG9ywcNAACA5YW3ILiZSmtk3hIZVNeMlAObPWp7vJBkkON5aDNm+JfJIx5NAkmrmBCp
|
||||
XNUtq7Cp4kmVKapr9uhHuCub2GK/oXYs5Qpbbah8tu22VCtu1zqoLHRi2Xf8c7J21T2yZ9s+uan1
|
||||
ssR6wW1T5zfNtoVIuyl3ObSllbfqiP+mCZI9QxqVtUv7dVmi8lZtjSwCWyST7W1y6NTdcmxmmyx0
|
||||
W/7fKIENAAAAg8Q20zO0Na5nq7VHVgJbftRTEg0fkDApve+OJ/9u9MXbVRK6RA2JPUbpcchdj/wx
|
||||
vZ7fHhfHyB3TSp17jsjdnlXW8qPyG2/Xw1pRhlp5weXWjcfltk0vyfrWaVkdTcuQzBdVtfQVR8E6
|
||||
tmBCZK09csnKGq7Q/3E0hjZtWnKhu0FmFzbKiXPb5OWp7TJzcZ0N3wAAAMBlfxNtGEJielbauqVq
|
||||
mzGdrNK2kHxz/cOpF+/5Sva0dnNtNbT6adOeu91tkO03yvZPqGyfpSrWr5l63cxVniqVtvQF2oAW
|
||||
ub3d7CjJyK+Xa6iumZUb3F47u83+wf8i1q0BAADgygQ206PKVhrzn69r69bWtRmdBjc9bSQ6Gj71
|
||||
kHvOkV8kdz6ShKaW+0El2R5srvUxa4lULqBFUgluRYJMA5u7HLlKW7rXWb6PW+QDmioFNJMFtVJw
|
||||
64WqCAAAAIBrEMxqd5vKY8uTI+tDSIwLZ5JPiayvaTN6XFT8bGMC2rLj9z/RC9Pfd22PseTHyLdL
|
||||
9miJLD02u2zbIf0fN1ux2IDb35afvjZZkVAGAAAA4PoJdaYU1oIAV9ujrTqERFcD2ztJPvrV+IF7
|
||||
vxueZShPbzdseFTOzHwjeaKxfBqkbYfsBuND6i2RdkqkP9r92dL2Sr++TYxbw2Z8lU18lU3lYc1X
|
||||
17JqmyK4AQAAALh+Als5qJVvtxW1SrUtXNNWGUaSXNWvqOGRH1fPlIe208/tubBlx2+f0AszD4d7
|
||||
rxVtkMqubytuM379Wtgamb6I2L4I5Sttxq9lywJcFgalaXqkyS4R2gAAAACs5Mhm6gFu2Rtrhy2S
|
||||
ZlJFQ/vGn995umdos2Hphk2PqOn5zxvd/oTJw5PKA1s4sD8MbCpLjbY9UttWSRNU2oohJFmLZFBx
|
||||
sycuB7ji7RPeAAAAAKysuNZ43YStkvVBJPUWybDSptvJIw6pdRt+1nTGWirasuOZrXpx/NXkBzen
|
||||
Qau0Vi0b5Z+1P+Zr2aLS/eLXtKlwDVu2vk0Vm3HX2yFru0/zbwIAAADACspsPUJbQ5XN1IKbbtqv
|
||||
LU1vR2T16IMTz9/75kChLbX1rqfv7LTHX0wi1mg5uEWVwBY13B75NsjyHm2lYSThmraGl8Em0wAA
|
||||
AABWdnbrE96a2iNLEyTDzbXNqypa883xQ7uO9DpXz3R04117H1xsTz1eDm5+37VqdU3C8BZW1iIf
|
||||
wJoDW709csmXBQAAAAArIbZVU5y/dan2yGJ9W3LpDRWP/mDi4O69/c7UNx1t/eRT93UvntlbtEpm
|
||||
Aa0a2Kr3RUFlrRzYmtsje70cwhsAAACA6yC0Ddge6adKdo1R/4zjtd8ZP7z78FJnWjIVpWvcTGfq
|
||||
oNHt7UX1LMoradXA1tQamVfVwsBmz77EmjahVRIAAADACotrPVsjRWpVNh/kjA9sSXBri8QvRPHY
|
||||
t8cP73p9kPMNlIi2fvaPa/Ts2cf04jtfTX5gtKmy5qpo5cCmGtew1QeQ9G6TvKSXCwAAAABXKqIt
|
||||
leAqjw4rayLlSptMSjTyczGrHjv9lz0zg76CZaWgrTuf+ZBpn/+RXjz3QBHMytMhi6mR/Vojg1OX
|
||||
gpoirAEAAAC4jsJb09q2IrD5qtysxMNPRSPrfjjxwu4Tyz3zJaWhdLqk0XPf0535XSKdzQMNHOk7
|
||||
6r//y6FFEgAAAMA1jWxmwNBWtER2JIpPSjT8XKyGfz1+5AuHLvXcl52GbIAzi18zev6+5OmM6bZv
|
||||
TTJWq3db5FItkeq9eukAAAAAMEAQG+Rx6oKKWm+kR1FDf9Xxqt9NHf7iUT5LAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAFfIfwUYAA5mpSOKNj+qAAAAAElFTkSuQmCC" transform="matrix(0.145 0 0 0.145 0 0)">
|
||||
</image>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 40 KiB |
789
assets/thirdparty/online.svg
vendored
Normal file
@@ -0,0 +1,789 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!-- Generator: Adobe Illustrator 24.0.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
|
||||
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="190" height="20">
|
||||
<style type="text/css">
|
||||
.st0{clip-path:url(#SVGID_2_);}
|
||||
</style>
|
||||
<g>
|
||||
<g>
|
||||
<g>
|
||||
<defs>
|
||||
<rect id="SVGID_1_" width="188.5" height="21.4"/>
|
||||
</defs>
|
||||
<clipPath id="SVGID_2_">
|
||||
<use xlink:href="#SVGID_1_" style="overflow:visible;"/>
|
||||
</clipPath>
|
||||
<g class="st0">
|
||||
|
||||
<image style="overflow:visible;enable-background:new ;" width="1315" height="153" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABSMAAACZCAYAAADU3y8IAAAACXBIWXMAAE0MAABNDAFsjJP5AAAA
|
||||
GXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAqfhJREFUeNrsfQeYHNWV9akOk0cz
|
||||
GuWcc0BCEkgiJ5PB2IATDusMZp12P8dde413ba/9O9vrgHHCEZywMQaTswQCJIRASEI5jyanzvXX
|
||||
qdc1U93TXVU93T3To7lHFNXdU1396uV33rn3aigANn6kqkZLxMfDp/l80Ko16H6fphkvUePTAB4a
|
||||
D2Q+E8lT73sLGjQIBAKBQCAQCAQCgUAgEAgEgsJAN/6lvNcR1zT0JIyz8aeo5vN1G1fENJ/eMu9L
|
||||
3R2F/O0BM31P3VxRZyR0knHU+3zaUr+mn2ckOujTtNk+6NXGOWjcfU4KGZmJlLQSomVOkKYJGSkQ
|
||||
CAQCgUAgEAgEAoFAIBAUBHoGMhIIGadDuo4eQGvXNG2/cU2n8ZdnkcBmaDgBv9ZYCGIyZ6bv8RvL
|
||||
R8cSmO33aet8Gi43Plrh82GiX9NM0tHvU+Rj+qH1U0hqKQnISkbmnWKBQCAQCAQCgUAgEAgEAoFg
|
||||
hEJ3/0hP/k9Pf6/rMeN0GBqeMj55GAlsND7dP/eroZaBJscztffo+6pqY0hQ6XgBNP39GrT5JB55
|
||||
BIwP/SQb+d5+tpOQ4Fnrb56dbqbtkirhIgUCgUAgEAgEAoFAIBAIBALv0F3+0I+EtN4nX/Oc0PWw
|
||||
8fIl44NfGuf7NU0/MPt/Q525psUTt/fA+8pnJRK+C30+/Ubj7cqAnYRMvvZrfedUMlKDD1lMtLU+
|
||||
8lHLNVECgUAgEAgEAoFAIBAIBAKBIC/YiUrdTk6mEJEpr7uMV08bL38BaI/O+d/uA7n8niPvd98N
|
||||
8GvllQt1JD6Z0LW3B0wC0jq0JCGZSkb2mWprfSbaSPMbCRsRiUxBawQCgUAgEAgEAoFAIBAIBAJB
|
||||
sdHPZDtNLWkSkckjgT5y0jiajLe/hqZ/J1Ae3Dv9C+1xL7+Xlfd77F9rgh3t0XV+v+8LCV0/t5eE
|
||||
9NvISE2971VF9ioitcz+IuGRjBQ2UiAQCAQCgUAgEAgEAoFAICgu9DQyMpOJNvrISL5O9JGRfN+u
|
||||
Q79Hh/bNOOJb5n0lHHb7yYy0HxWR8WDl+fGEfqumYUbQroj0KyKyz1TbdiYJaakj+/mLTCMitTQ/
|
||||
kbYUCRcpEAgEAoFAIBAIBAKBQCAQFBd67/9s72EPYGMjI9GnjrQTkokEwgld32h8/vmyyuDjbgrJ
|
||||
jLzf3e8sXxPXtTuNP84I+u2m2UDQr/W+TiUj+4LY9IukjcxkZKYEiCpSIBAIBAKBQCAQCAQCgUAg
|
||||
GBw4EZJ6NkLSdsQVIRk1Xj+pafiYP9L94vRvmpxlRgTSP7jnneWzorr2bePGM0zfj+hTOlpBafwZ
|
||||
Dot49CP5neSRkYiEXRWpXgkJKRAIBAKBQCAQCAQCgUAgEAwBtPRI2nqfibamXiS0PkLS/Ery8+Tr
|
||||
oHFak0jon4n5Kz4NhF5z+Kk+3PeOivqQrn8vlsDbLAVkMGmaHUyqH9OVkilRtbUMqkgtExmppZhj
|
||||
a/1SIhAIBAKBQCAQCAQCgUAgEAgGE+nBaxT5qPe+TmRQR8YTfa9jCTQnEvovfbp+y6yvh1oy/UaK
|
||||
MjKq61eHY3gbyUVT0YgkqQgbwYj091qKEjLlbzYVpDq0vtc6Uv1D6lLgAoFAIBAIBAKBQCAQCAQC
|
||||
wVDBEjuaZ916ryVJSt3k/SzFpAX7a7+GBmja1bqub9r/Mfw2k7l2Lxl5zzsqx4biiY9rGdSNvgwR
|
||||
st0Oyy+kaaqdVEPa1ZHWA4oiUiAQCAQCgUAgEAgEAoFAICgN9BKRmo2YRJKUBBWQiqW0iEmfjdsz
|
||||
/w5MNq59awwVjwKhg+n37yUjI3H9XZE4ltMcu1e9aAWfQSZFZJ8qMuV66zu69ZmWpo5MM88WRaRA
|
||||
IBAIBAKBQCAQCAQCgUBQMkhXSJpInn0mKambkkefdTH6lJQ+DeW6pq1IaNp1+z+Gb6erI00ykqrI
|
||||
jnD84yZxmFRAWqShpYrU0hSPms0fZErUbPSpITMGrrH7ihRVpEAgEAgEAoFAIBAIBAKBQFB66I1U
|
||||
g1SJpHkmf9jnYNJSSJo+JjUz2M14aPrlUZT/Dggfsd/WJCNjCf3auK5NCthJSPRFw+41t4ZSPKpD
|
||||
M/0+ar7U76S+Tl6jod/ZfBZRRQoEAoFAIBAIBAKBQCAQCASlC5sy0uIjTamjyQ3qvTygRUgm1HUB
|
||||
469zEvC9zvj4F/bbmWRkOKZf20+9aFc1AlmVkVm/Z5lnOygjLQwHhWQ0OBrxQAV0X7lxGNmm+ZHw
|
||||
lyeD8GjJ50g9pz6jyECHClo8DC0RM1pFDL5YyHgfgj/cZr4XCAQCgUAgEAgEAoFAIBD0wTLL7mWy
|
||||
7IpIoJeURFrgap/ed1nyGKNpOGP/x3C73VQ7cPc7yxvau7GqV7UI29l+ZPrM/jf792EjItMVkdYD
|
||||
2bk5vTTj2ESC9eYR81eidfQqhKsnI14xHonyWiPnKhGpHA+/3wefZhw+dZhm7smzdajndScsBYVs
|
||||
OH2yW393IwKxTiDahUDXIfi7jqKyaSsCPY3whZuM8wnJMIFAIBAIBAKBQCAQCAQjHnbO0Xxv5+yS
|
||||
ZGMvj8ezpY60EZiWUjKhocb436IoKsYBoWPWbQKJOC5J6KgPaBkUjsigcLQ+0/oHuknn14arr8hY
|
||||
oAbdZeNwvGEdWuuWo7NyOnyBIAKBAPx+f+85aBw+nwa/z5+ViHQiJNNfCwrQaHS9//vaiYgZZ76O
|
||||
jFlqntuMo7xjLyqObUTNgQcQ6DoMf7hFMlAgEAgEAoFAIBAIBAKBIAlNsxGSab4jdds1dqFh71k3
|
||||
/zQxAd/Zxts7rXsGYjGc03uhPTpOBpWkdZ35mW4jGm2qRztJmSlSdnoCSw1dFZPRWLcSh8acZ7ye
|
||||
iIA/gEBAEZCpZKSvl4T0+93IyOyKSCEjC4uMZGTybDpRtb2P1c9BR91sdE29ADV7/oaagw+hrGMf
|
||||
kBDzbYFAIBAIBAKBQCAQCAQjD+ksVbaI2rDxhbqe2bI6eb86Dfpi+z0DcR2z+hSLWkYVY2/AGdtn
|
||||
1vt+qknYVX/Z/USWGnQtgPbyKdgx+Xocr1qkyEeTaKT5tb/XDJvEozqS721/609Iop86Era8FhSx
|
||||
PDOQktZhvWcZ8JyonoC2xe9GaMJpaHjphyhvesn0MykQCAQCgUAgEAgEAoFAIFBIV0f2+ZbkPz2j
|
||||
a0fj4wrjusn2+wQSOsbAyR8kbEpI2GzCNZv60e4HMo0BdX2QEuDkdM2PpuqF2Dr+enRWTO4lIdXh
|
||||
Tyoj+w6liuxTRGZSRtJ8266GFNPsIShXGyGZTkTyMIoKiUTCLKtEQkNk7FI0rv40Grb+ENVHnoQW
|
||||
65FMFAgEAoFAIBAIBAKBQDDioKdbO+v9X1t8oZ7JRLvvddC4vs5+K5KRdXZurF/0a2SIrI005SRs
|
||||
akn7vdLfpyksSwVNVfOwacLbEfLXIahpJtGolI59Ksg+RaTf/DuVk3YikoRWP2Vk8qH7kZHpGSUo
|
||||
SqvRM7y3KySVOtL4LKH31slE7VQcX/1pjN/0ZdQcekRMtgUCgUAgEAgEAoFAIBCMWPQKIe1KyAw+
|
||||
I+1+JTU95fsB41Rtv2cgkTDDbGc0pU4nFtVnWlZS0TLRduLZSi2ITUf5JDw/9k3o1moR8GlJUlFL
|
||||
BqaxDp/t0JLKSC1ppq31EpFKDemzqSJtptpIZWuFiiwudKuCWWpI21v7YZQcEmZ0eRXribJiBCtx
|
||||
YtnNCHQfQ0XTVslMgUAgEAgEAoFAIBAIBCMKdlrFIiTNz5EqkuwjIm0MpS2ytoGApmk19nsH4gnU
|
||||
B/3oC1pjC1yDtMNult1rkm2ZbOsOD4C+60sJUX81No+9Hq2+scwZRShqtiP5PpWMNA6t73Ofr++s
|
||||
kZTUfBmjaPcRtUJDDiYsfaT9bP+n+ew+DRLGBT7jPx3x6nFoWn4zJj71afhDzZKRAoFAIBAIBAKB
|
||||
QCAQCEYeUlWOKYFset/bjwzfS+cDAynBaDLwZOmKyXQT7WwYDpzb/trTcMA30zTNNtWNlq9HX+r7
|
||||
lEPrr5702dSRvUQk+hOSfTk4PPOrpNuG7lwPrb/3tg+tTx1pdz6g6z51hfGHcMNitM25Bg0v/4y2
|
||||
3JLJAoFAIBAIBAKBQCAQCAQOcPAd2YtACnOpp15oqvns0sp0VrM3vrftznbG1E5k6mmpGWKVZChQ
|
||||
h6015/WLet2PfHQ57N+xKyLtZKSmISXrhXgsQmXPkKd9RGOqbwPYqi0pRpZdeuB5XVdl2jbnjajd
|
||||
ex+CnQclkwUCgUAgEAgEAoFAIBCMTKRxKuZHNktqOzXoLMdTTiQLCsv/ZKljT/2Z6EQVgj4kyUT0
|
||||
Ixf7EY5af/KR3+slMjVbsB8tPaiPbssjYSMHvaXY6qdSRKoy8BnXJFKIY90sW4uMjJXXonXuGzFu
|
||||
87clOwUCgUAgEAgEAoFAIBAIbEgXf/UTPWYQIwb6XezxSPcZWWr+IJ2ga37sKj8lRRHpdPiyvoZN
|
||||
WZnpXipj08lH4SKHrOStEkh57bMKJXnoveXrM8PbdE47D2O3/hharEeyUCAQCAQCgUAgEAgEAsHI
|
||||
g57hfTrp6NVnJP+X7hPSK9KjbQ8Xjq2zbALa9FGKWLT/s5lY+7S+o8/sGr2EZMrnGf/Zooun5Ywm
|
||||
sbRLoAWlVt5+Jva2+hAvH41QwyJUHn9esk4gEAgEAoFAIBAIBAKBIA29ptlp7h6tQNl2BOyRsvv5
|
||||
d0yLrG2Ppp3J1+RwwcGaU5DQNPihpZpVA1lUjtbftbS/wfUA+ishRRmZP3p6Quju6UEkEkEsFjOr
|
||||
YCAQQFkwiMrKClRXVXm/me5ejl0T16PymJCRAoFAIBAIBAKBQCAQCEYgPCgjU4hIZOAZkwg48WIp
|
||||
ykd71O1hnn9NZdNSiUfYiEibqtEe1qTv71kO3f53PRn8J2miraeZauu6VGIzG3SEIxGTWAyFw/D7
|
||||
fCaRWFFejmAw2O/6RCKBE03NON54Att37MKuPXvR3NKCjo5OJIx71dZUY3R9PWZNn4alixdh7JjR
|
||||
GD9+HAJ+v2Mr0q2w2skjU/mGG+ZLgQkEAoFAIBAIBAKBQCAQuCGTkNGGVJ+RybNdDZnR/nuYKyO7
|
||||
tep+n6WqHbXMPiBt16V+R8sYlMZ+nf080hGPx9HY1IQjR49j12u7ceDQERw9dgwVFRWYMmkSpk6Z
|
||||
hLmzZ2H8uDEmuUg0t7Ti1V2v4R/3PYgHH30Mr+3eg1A4Yn7HH1DVOGHc1yQ1jXyePWsmzlx3Gq68
|
||||
7BIsXjDPuNdYM/91Xc9c9rCXo96vbGmqDeGQBQKBQCAQCAQCgUAgEIxEePUZqbt/NzAS868LVX2E
|
||||
I1IJSMJ+zmx6raecNVivVTTm/sFr1LmruwvRaAxVlZUoKwue1HlMpWI8FjPPPp8PwSRh2N7egZe2
|
||||
v4q//eOfeOTxp/DStpeNvPGZSkgqH6PRKCrKy7D2tDU47+wzcOWlrzPNr/9419/xy9/+Hq/t3osJ
|
||||
Eydi2szZWLxkCaZNn47q6mozf7t7unHo0GG88tJLOH7sKH78s1/hr3+/F2++7hrc8KbrMGf2DFN9
|
||||
qWXQCWsuptrxijrpeAQCgUAgEAgEAoFAIBAI3OCqjHRTPLp9Dgw/ZSTKTIWcqZJLElPWe8Uh6r0E
|
||||
o57FpFp9rpR2uqbMsjNdG4lGTSUgIzPf//BjOHT4KGbPnIFFC+Zi2tQpJjl2MiEUCqPxxAkcP9Fk
|
||||
nJtMpWJNVZWpdqSK8f6HHsX3fvRT7Ny9BxMnTsTKVasxdfoMNDSMQTgSxonG49i7ezdeePEl49pH
|
||||
8PhTG1BTXY07/nQXxo0fj3MvuABXXX01Tj/zbEyYPBm1o+pQlizDCH07tnfhxNHDeP7Zjfjrn/+C
|
||||
5559Fl/95vew+cVt+OwnPoaVy5YgGAz0lreyzE4z2YbeVz+SSARrRBkpEAgEAoFAIBAIBAKBYGQi
|
||||
H2VkGkakMjKiBxDMYm5tvVfof42FPvWk1hc1O6mw7OjswvHGRrS1d6ClpRU9oZCpDvyfr30TejyO
|
||||
8WPHYPacOXjX296MU09Zav7tZMCRo8ewcdPzuO+Bh00y8dDhI+ju7kFtTQ0WLpiLyRMn4m//uBfw
|
||||
BXDu+efjDW94I9accQamzJiD8rIg4rqO7s5OvPbKVjz28MP4w51/wEOPPmGqJZlf7/yX9+DNb38b
|
||||
Jk6eatbtqPG/aBwIJyu6z8j/yppqzJo3D3OM48JLL8Vf//AH/PgHP8CDjzxm+pf86hf/E6evXmXk
|
||||
udbbYjLxzdlM7wUCgUAgEAgEAoFAIBAIBA5wISUDGRWObgpJDcNaGanSqyefIZl4Xe+NqmxXRsKm
|
||||
ljTNrWELT259rqnvRqNx7D9wEE89swmbntuMAwcPoDwYRNDnQygaxa5du/HFT38cq09diu/fejs+
|
||||
98Wv4Ntf/SLmzZk97OvZgYOH8N0f/wy//v0f0NUTxtgJ4zFp2kxU11SjtbkZr+7aiyc3bkJtTS3e
|
||||
9La34aYPfxgzps9AxPhuJG7kXQJmzlbW1mLN2vVYvW49zjznXHz/O99BU1MTPnjzzbji6tcjZlzV
|
||||
Fc1c5xI6g9wY91K3Qs3oMfiX938A8xcsxBf+83PYuOEp/Mct/4sffvurmDd7Zm+ZW2VrD16TKZiN
|
||||
KCMFAoFAIBAIBAKBQCAQjEiIz8jCwEntmP53uwLSfq2FSCSKTS9swQ9u+wW2vrQVp5+6EtdefiGW
|
||||
zZuDmppqMwr0y6/uREVFEGecvgqzp0zCez76H/jOD2/DN798CwIB/7DNx6bmFnz569/FT375a0yY
|
||||
PAVXXnk1zr74EsybMwcNtbVoP9GIrS9uwcMPPoyFS5bgwx//KKqqjc8zkYoJoMM4+X3AaevPwPQ5
|
||||
89DZ2YlZc2YjFAdicY/R3I37hiPG9cZ9zjrnHHzl69/AzR98H554egO+9q3v4+tf+rxp/m2VrS4R
|
||||
zgUCgUAgEAgEAoFAIBAI8kc6SZmG1GjabqxmNl+Rw04ZqdvUkUkFXFLt2U/1mKaMtD6z/y0WjZqq
|
||||
v89+4cvYu3cvVi5bhFgsht37D2H2tKlYcupyTO+ciOqqCvT0hID2DkwaPw4fePt1+MSXvoU9e/dh
|
||||
3pxZwyb74omEGYiG5tOaz4ff/fEvuO3232Ds+Am46ROfwjVvfweqa2pA4/NaI4PK58zBqtPX4oo3
|
||||
XIfKmlEIlgXQFclOKvJzKhx5TcP48Rg3YTxCUVu5eIR1H5Keq09bhU9+9nO4+QPvNf1PXn7Jhbjq
|
||||
kot6lZGajn6qyP5lLv2JQCAQCAQCgUAgEAgEghGIXJWRevbv+kZi/tmjYlvv0yNku9us973esXM3
|
||||
PnvLl/Hitm1oGDMGK09bh9POPBs7DjTiQ//xJTz2yJMmmVVZUY7ysjKYgW+6urBy8QJ0Guef/PI3
|
||||
6OnpAbJGEyqdg0FpNjyzCT//zR343q0/xw9/+kt870e3wef344YP3IQbbrwRgYoadHQBrZ1As/FY
|
||||
7WGgOwrUjWmAPxBANOq94vHaUAR5EYE6ic04cOXVV+HqN7wR4XAYP/nFr01/lox+nv6MqZHT08tc
|
||||
IBAIBAKBQCAQCAQCgUAwUIxcM22vKtC0v2vJf9braCSK/7vtF3jxpW2YO2cOPvOpT+L6696AikAA
|
||||
73jb2/DO974f3/3J7Tjr1GV45/Wvx8r5cxGPxaCRnCwrMwPa/Oz23+CSC87DeWetK+ksIxH5o5//
|
||||
Grf/7k4cPHIU5eXlCIdCiEQimL94Cd783g8gEteMz/rIvO64+m4lsy+MXmljQu9v6l5MxKJGGsp9
|
||||
ePu/vAd3/fEPeOzJp/HyqzuwesVyR/5VqSVTTfQFAoFAIBAIBAKBQCAQCARZ4Gqm7cUU2ylwjYZh
|
||||
KhpzYR0thVzK9fa/qfPL23fgL3ffgwUL5uOWz/0nrr7yCrR0tCOi+TCmYTSuvPxSfPsb30Braztu
|
||||
fMebEe3qNs2bg4EA4vE4qirLUV9bi1t/+SuctuoUVFdVlmRuhagmvP13+NLXv4260Q0484KLMHX6
|
||||
dBw+cBCHDuzD1W++AWMmjUNXV3+SkarEiJFdZcbngaQkkgFryozXFb7Bqz4MfLNy1alYddppuP/e
|
||||
e/HQY09i9YplcFaD2spexJECgUAgEAgEAoFAIBAIRiIKaKYdGNEZmFQ39p6R9j55XYo6zpa5hw4f
|
||||
xQ9+djumT5+B/77lc7jyiivw/Kbn8Me/3IXly5fjujdeg3A4hKrKCjNAjR4KmwSk+dO6bn62dsVy
|
||||
rFq5Aj+748948aVXsG7NqSWVVQy8c/zECby2dz++/YNbTTPrt3/gQ7jhgx9AVc0otLV24vDBPZg2
|
||||
ewF6Lc3TwPyLJqNca/G+IiBBGdDUARSf66P/yPKKANafebZJRm549nnnxuMWDUogEAgEAoFAIBAI
|
||||
BAKBQJAKTwFsvLtHzP73YZcrehZfkXZ/ktkz4MChw/j+T36Onbv346tf/h9ceO7Z5l/uvf9BfP1b
|
||||
38EZ69fh+jddhyNHjqGubhSqKyuRIBuWBIPAVJeX45Z/vxl1o+tx133349nNW7BuzcqSyaVjjSdw
|
||||
2+2/wwOPPo69+w/gRFMTTl27Dv/ykY+jYlQVIiFg1Jhy1I8fg3hMRbrOBi3D+xgjZxvfqw0oQnIw
|
||||
RLbGz2HJ0uXm6/0HDiJqJNyv+eApgpMQkgKBQCAQCAQCgUAgEAhGIkQZWagczEUWpyORiKOrqwvt
|
||||
HR343q2/wLYdu/Gl/74F5565Ht2dnShL6Fi+dAkuPP88rF+3Fprmw/HGE6iqqkRFRQUSsVhfCnTd
|
||||
DOIya+pkaD4NC2fPwI4du1AqjBfJUhKRX/zat1DfMMY4RmPKjCAuuPwqVNVVobtLBZim0NMXVwSu
|
||||
36fUh7rHRyD5GOb1RrZUGzUxqKnANnqWawuRM7xPw9hx5utQKITu7m7UVlfDWwAfQUmhzCg3X1oX
|
||||
ljAqU7RHRS0SCAQCgUAgEAgEJxcCFcZRnra8Nxak0bCxFohK/ggEpQJPPiMz+YL0wssMV47GTLfd
|
||||
D6Cu3lufW2ya9Tp57N67H09tfA7PbXkRz27eitt+9EOce9YZ6OroQML4e6i7y3y/ZNEi1NePMpm5
|
||||
puZm1FSUG/1lGcLRaL9kREMhlBl/HzN2LHbsPuCdySsyOju7zYjZfK63vO8DWHf2Wdi/bz/WnnM+
|
||||
rMfw+dQRi3Rj32t7MKquFuOnTjeDxWR6DF5rz14kq14kkQwwY/y9zA/4k59b9ZKXx5PfKStA/Hef
|
||||
T+k0E3oC8VjCVhfcDulPhgT+MqNy1BnHaKBqtHpPjJkNlNekXhvqANoPGxXKmIyEjdc9bcZnrcbR
|
||||
bhR4XPJSMPioGKUOzZ/9GhLoPS1GRycTaNVJG3kVrFRnJ7Cd8xiKcXM4pHG4toeM8zZjrO48rvIy
|
||||
H9SMN8qtAsg1KN1gtNHqMUbaqoykuUx0YiGgu3l49BckDDh+8zxUiHaruUA8Iu1PIBh284GAmutz
|
||||
/s91QFmV6r9rJ6gjvZ/uOKbm/Gafzfl/m3qf79gxXMExhX2wtXbKBPaN7CPZVw7WvQTDG16VkZmu
|
||||
F2XkwHH77/+EO/5yt6kaXLxwAVYsX4ZIKGQSdhZoij150gTEYjHEohE0Nzdj2YyJRk4HHBcjmt+f
|
||||
YsY91GCE7LraGixZugxvff+HMHPaOHTHdQT1GLojCRjJRbinE3Gjg9/w2Ab86kf/hxlz5+JDn/o0
|
||||
xk+dYTyLr5e/M31u0t1mImbMsQP91JP8O6NrM9BNT0KZbPttZtskIskZ8h51AUVI6nm0nbaWFrUm
|
||||
qihHdVWVVOxSBBeqtePVwrF+GjDlFGDaKnVUNngLxX78VeDwi+o4shVoPwK0HlQkpSgnBYOFmeuB
|
||||
+ecbHU5d9muObgNeuMOYRB+V/CK44Jh/AVA91vm6g88Dh7YMzaR3OKRxuLaHjAucGPDUD4y28vLA
|
||||
+28uoFa9FRi/wJ3wG+w2ygX3KdcCk5cZr4PO13KxvfGnQPM+lPwu6ejpwJIrVJ4PFQ5tBrbdbYz/
|
||||
B6T9FZ34qOwjhQSCAa8BjP65st5YBxjr57rJwMTFav4/1ThGT/O2mdV2WLX9I1wHGGuAln1qHdDV
|
||||
pKyoRgrGz1d9MNdS2cC+kX0k82uw7iUYGfAgYgxktel2U0pqGLbKSN38ZyVft71P/Wc9nPWeBGNl
|
||||
RQViiYQ6G+/1crU7oCXJEZ7NaNllZdi5YycOHz6EN118lsm+0TQ7K4miK5+Veolk6OjRo3DLZ/4N
|
||||
YX85xjXUw09VWWcUNbE2+HyV2NvUjIfvvRevbX8J9/z5z2g9cQIbH38EsVAIb7/pQ5g+ezoCldXG
|
||||
0wSg6WG0t7Zhz/Y9aJgwCbMWzkMi4U8Rqlm5QlIykh6t3VahO43v1PmUenIgOcXlx86dr5qvJ4wb
|
||||
i7LygJGOeL+yz/RPlJGDNAHh4n7cPGCpMeDNNRat4+b2N8XwNAAvUMeK6yhBNiYjW4AtfwJ2PwE0
|
||||
7TYqWqeolQTFByfRy9/YfwffjlfvB16515goCxlpYpSx+Dj7I8Dk5c7XPfIN4ATbcrek8WRqD9nA
|
||||
Prx5v1K5DGhiM1MRflNW5P7dYrdRpm3VDcZi24PfcC6kSXC3HVUqyVIGy3nBRcDss4YuDSS+dz8J
|
||||
tAgZWTT4g2rzmIQRFcx7N0ieCAawBtCUcp6WT3PPBRZdBkxaoj7LFSQxeSy+TKn1OA6/fA+w/T4l
|
||||
VqDSfSRYTNVNVfnoNFehaOPgZnUM1r0EwxuijCxEJqab3+qpptsZzHSvf/3l2L5jF17Yug1XX3Ul
|
||||
xo4dg1BPD4KBABLJe1r5e+REE777gx+jpqIMl55/FuLd3d7UXCVCjviNtF5ywTnm645ws9FhBzDO
|
||||
mPRWJMKoQQ8eeOIBfO0/PmkSeeOMfFh17pnYtXsv/nrn77B924s468LXYeqceaiorkVb41Fs37oF
|
||||
Gx57HHMWLMDHPvd5zFu2zJi7VJoqSbtSkhbUNOc2idmkT0o7aNLdETXmPMG+yus1x3jfRDSGDU89
|
||||
ab5ftWK57cczmGWn1xNBccEdde66Lb0KWHG9UlT4/AW6dwUw43R1kIzcfAew52lFSo5U0w3B0AzY
|
||||
+V4nGF55JuVauPygKuaV+wZORs44DahqKM2yXHQpMGqix8mMMftZeDGwf5NS+0j9FwwZeZRUsHGT
|
||||
Ydnr1fztmZ8LGSkYwMKzTKke518IrHyT0d+vcDYHzvXeExaqg+Tklj8AOx5UpORAxxOBQOBtDjDg
|
||||
aNpe/EfavzsYoZALnjO6h/d9ny9fsgD/9amP4hOf/zIaG09gx569qKosRzQURldXD7o6O9DZ1Y3m
|
||||
lhY88vjjeNI4PnPze8wgNZGu7hxLrHRQG+8E7KRgIo6lMyfiDZdfDH8ggHPOWItz1q/BM89vwQ9/
|
||||
9mvs2LUHv731h9A1v/n3SDiEqopyVJSX44kH/omu9ja898Mfw4q1q1FVN9aYV1eZilLF+SUQDbej
|
||||
s7UZFTV1qKge009BGSJ/GAGqgyrojdfqV2Fcv3P7Hjyz4WmUl5XhvDPXeaz0yc9kUl08cNdzzlnA
|
||||
eR9XypBCTUAyYfaZwMy1wPZ/Ak/+yJg0b4TRQGXVJJCF+3DMh6EaNodDGk820ISZKreB5uWkZbmb
|
||||
hw9GGy2vVeMffUZ6BVVDz96uXI+IP2Rpg0MB+vEbM0u5q1h5vVKwWeazkteCXEA/kFTbrX8fsORy
|
||||
431N8X6LhORFn1YbQE/fBrzyD6DrhIhOCt1uJTtHVl1xIx1FGVk4nLJ0Ef795vfjxz//DQ4dPoSJ
|
||||
48aiKxRCT08Ie/bsRUdHOyqCAUwaOxpf+uS/4rKz1yFq/G24tMlQOGw81zFEotGMQk6ShuPHjsHy
|
||||
JYvw429/pU9VauDqSy8y8mcxHnz0SWzZ9jJ279mP7u4ejBnTgGWLF2DC+HH4zZ1/wYsvbsZ/fORD
|
||||
uOCyK7DmrHMwfe4CVNVUQE/oaG9pxysvvoAtz2zA4lNOxds/dBP8/qoUhaQVhZu+2yuNeU950mwb
|
||||
WnZSkqpIRt75w+9+h4MHDmDlsiU4dfkSqdClAJpl0yT73I8aE9s53hTE+YLKEu6O1k0BHvmW2iGl
|
||||
yYZMRgQCgaA0MW6+8iVMxXyuBByV91yElteU3nNxc2zs3Nw24Wh+OH0NcGSbCmYzUha4gqEH3ebQ
|
||||
l9+s9cDqtxj1d12fr0gpb0Gu4GYMRQJn3QjMPSf34GYDAX+DLgW4/hg1AXj+90DrIRWN+2RDIa1z
|
||||
xNJHMJD6V7Bo2sjwdy3D34ZFxui2NKeZa+sZ3qeZ8F5y/lmYMW0yHnjkSTNadl3dKMycNBYH9u5B
|
||||
T3cnvvnlz2Hx3FmoH1WLWCicW2CaISZDXn5lJ776vR+jpbXNmO/7+iUt4PPh+msuxzuuvyZjWmdO
|
||||
nYz3vO069ITD6OjoRDwWR3lFOUbX15nVZc2KZfi/n/4aT23chHv/dCfu+dMf0DBuHOrqRyMej6O1
|
||||
uQmtTSfMADqP3ncv5i1cgHOvuBw9IR/s2Ui+Kmb8fEcMCDEKt/E+6OsLfJPSBvi3IPDk05vxq1/+
|
||||
DBVlZXjXW67F6Lpa5cfTSyRtUUYWB4yOt/oGYxJyk1pcDTYYGOeyLxjpGKPMNujYWghJwWBPBHWI
|
||||
cmegE1lRRp68ZZsOutugemb/87kTcGPnATXjBu76o1htlBtjVJaRZM0Viy5RZutdLaVbwXRJy0kD
|
||||
Trzp5mDaqcCKa5WrgJqxkteCgYObQ/Qpe85HgOmrBv/3G2YAZ35IkZJP/QQ48drIDnApykjBQMvX
|
||||
A+mYDaKMHCCWLpyP5YsXwufTjMNnKgbnz5mNj3zqvzB3+hQ01NUh1NU17J7riY2b8Oe770VFIIjR
|
||||
1bW9gVv4fF3hENp6ulBeXqbISAdUlpebRzqoRvzm//wHHn78aTz8xAY8+/xmdHV3o6vpODQjHxtq
|
||||
qrBk9qmIxOJ47KmN+P5XvoQpM6Zj9pJliETSCMnkOWp8FuWcPg6QP9VsfyMRaTwK9u/dh69/+b9x
|
||||
cN9enHPmWlx31aVSiYcaNMs45Q3AGR8YGiLSAn1TXvxZINajAtyEOqRsBAKBoBQx9VSg6i+5k5F0
|
||||
/8EI6KUG+kijL8uBmI/ze+MXAo2vlX4gm6FeNMnCOH9QuUsC/IJ/V0EGBYJ8QIXt7DOAMz84NESk
|
||||
BRLqp71TvX7kO0DboZOv/yvUdaKMFAy0/jmMw4Gs7vLsr92ibVvvh4vfyIwBapJ/SFHBeVDM9aon
|
||||
YZov60YmdHR2YUJ9/cDTNYQIR5SSc8Hk6Xjj6jON1zoSRpoqgkFsfG07/vzcE8m0DjydtVWVuOri
|
||||
83HxeWehsakZ+w8dxrHjjWYgoCmTJmH61Ik4dLQR7/7XT+CFZzfifz/zaXzqS1/G9AWL4POVIZ5I
|
||||
3bjSbNlHYtKqin4/zbPjOHpgL77/31/CA3f/FbNnTsdnP3YTGupHZVHDupS1oDCgGmT22YqIrJ/q
|
||||
/XuMItrdCnQ1qjPfp9+3sk4tOqsbvEfhpkLzzJuA1sPArsdlYSeQBfNwmTyXujJSVK+FxeSlQMVo
|
||||
tdPoNVMZZGPGGtXPl1obpe/HukkDc0/C8W5RMpBN877hXa7hLiDUWhz/l53GfCEWkTaYN3lUqaxJ
|
||||
vBCRktcCx77LWKBNXKJIwJmn57AGMPqHcKfy8djV3H+uTvPr8mql4OVRXoVUe7ksoEJz2VVAywHg
|
||||
mduBnjaZe5XSvQSlX1eycYXp12aYRwVGbg7qWXIn16Mvl6NJP4vaAH3eqa8Nbeu10j69YRwuW346
|
||||
YvGYSUZWV1QgbEzmSEb2mjbnifKyAKZOGm8e6Rgzuh7/9YkP45O3fBWP3n+fMY+M4H3//u9YtupU
|
||||
Y8wYYw445CNTksJI3FARufki2tOMV7e9il98/9v46x2/x/Spk/Hpj96Is9euTj5DrmUvKBgmLADO
|
||||
udmY1M71tgjjBKTjOHBsu4rSuOsx4ODzagGTUqlqlRN17rLOOVOZ9FF16Q+6/wa/d/ZNylT70Iv9
|
||||
iU6BYCBggAkGSXIiQo68xA5L8kogcEPDTNWnH94KxCPevsMAafTJWFZdWs9C64B55yoTwYGC33/m
|
||||
JAhkc9gYc1/6myIYCt4HHzDmD8ek7QgEpYJRRh++6i3A/PO9+YikAqWnFWjcpTZfKBrY90z/dk3f
|
||||
pWNnA9NWA7PXqjNNsdnXuqF+GrD6rUD7UWDr3SJKyASujw684EzWntitrhMIPELMtEsBSTJGLwFf
|
||||
dfG4khzG4nG0tLchmiQjw+GwGdym9xqmucjpveJ15yGeSODL3/oBntvwFPZ96EN4/VvfirMvvhRT
|
||||
pk9BeXU9AmWV8HGHjckxro1GQ4h0t+L40SY89dADuOu3v8aul7dhwdzZ+NiN78Y7rn+91LdSWICd
|
||||
/i5g6gpvDvsj3WrhueUvwOY/GpOPo9l9uoQ7FFnJY+MvgVOuAU69Xvk4IlHpRnzSd83hbUDbYTUh
|
||||
EQjyxfN3qEMgEOQPqgGnrVQEf+dxb9+ZuEgp5QcjOFouoMqMabMH/8gVJGZnlHAgG6/TRC5gt9wF
|
||||
tOyTOl7KKKQpp2CEMg/lwMILgSWXeQsoFgur/uHle4EX7lR9XbZAM9zU5d95vHCHCoyz5m1KnEC/
|
||||
vJrP+bcoYGBk+OO7gEObpazSsedpdQgEuY4Zjmba2S7MJP7T0s76cB18bCa3/QKYIHsgm5RgJv3N
|
||||
tAeSCeY3fD6Mqq5CU0urGfDF7/cNWc5YZGTCSEdXZxfC8ahJkuqRGCJhpUKIxqK2vCgurr74fEwY
|
||||
OxbfvfUXeHrT8/jR//sq7vvLn7F8zWlmtO2J02egqqbG9N3Z09WDxsMHsH3rZrz47LPYtmUzxjaM
|
||||
xrlnrMWH3vN2XHrBWX2Erz2IUdaANRnqhSB/zFwPLLgAqPTgyoDk4qsPAf/8CnD05dyUH/T9SEKS
|
||||
i9ZzP2JMfC41FqRj3CcjJC93PGwsdJtUyHaBQFC6k5xS75tFXJ87uKDkRlW2YDOm/8d6pZb3Ai4w
|
||||
nXwyuv0e3CfTOYPjEDe/aie4/KbuTqIuvsRYqJd4IJtcFy6C4V02Uo6CbBg3H1j4OqVY9DIWUIn3
|
||||
2PeAbfd6V8MTEeO72+9XxOSaG4DTjKNhutrQcsKsdUbffD5wfIcSQ0gbFwjc64pXM+0M70UZOdRl
|
||||
qZshYrB03mz86Be/w/Zdu7FkwdwhS48V+dtvBuVJ/ZsvSeIw6jV9SfoGSWSwdtVyzJv9Ofzx7vtw
|
||||
930PYteefXjwrj/j73f8FsGKClRUVZnqyJ6uLkRDPQgEAmgYXY91q1bg0ovOxfVXX4bpUyZJZSsF
|
||||
cAF5+juU034vk5Bt/wDu+wpwYtfAI9wdexW4978VsbnqTe4mcVSacMf26PaTz5G1QCCTYpm4lzqO
|
||||
71QL1cosBCJdaphuDzz4jaQJ4ORl2e/l5feKAUb2nnum8muWfYKolII1453NDBnIZkKJBrKRNiIQ
|
||||
CCzQVcai1ymlohtIPO7bBDz0TeDVB7OrId1AS6enbgVCbSpYzvi5cPQjybFlztnArieUKfjJMJfS
|
||||
C3CNQJDPHMBBtxfop3TM5hLRKZjNcJtw9AtW46KSy6aGTFdGDlApGA+FcfrKZViycB6+9r1b8fl/
|
||||
+xCmTJ6IsmBw0LOGRCPh0zTlP9L2SAGfIiOj0RjisRh8Af+gpWtM/Si8/4brcNkFZ+O5F7fh6Wdf
|
||||
wPadu9DW3oGunh6THK2aMBZ1tTWYPWs61q5aiTVGns6dOT1ZNOmKSLcyR4aylz4lb9BkYspyd7M0
|
||||
5veeDcAD31A+YgZKRFpgYJqHvg3UT1EO/91+f9mVyiycptpOakxG4QtWZVeucDJlKizTdnOpwGGQ
|
||||
ndpxykyFZivWPfjskS7lk4V+V6jwzPf5HSeHVUoxSr9qlaP67xrHY4oYZnq6WxSpm69vMv4Wj2xK
|
||||
JN7fDDoQ7v83lh3z3bxHbX9/oEwr02nl3VD7UXN7Vnua3ZS4IyXf9CJfP1LSmA3cWDTbe53qfzLV
|
||||
BztYn3iwXvDoaVH9QrHyjUoWq65n6lu5YUTfkQdfdPe1WmvU+Qnzs/uLZD0//JKxAK13JyMLuWBb
|
||||
eJHxHFOdVY8M1PbET4A1b1EEbDZYgWz2DfNANqW2IGbZsN5wE9VsJxyrKxzG+6hqJ2YwnnY1ZhbT
|
||||
FzD7dNZbthO6oQmWp1p+WOmJhhRJHepUafKqLqNamG3CIsLZT/B33MDvjM6geqPKLJff7/e8Faos
|
||||
eH/OmXjws/S+ir/DICfmfKWrMP6/zcCIDtY8zF+zvEO2OZ7xnVGT1NkasxlIyRxrW9VYzX6X93Xq
|
||||
BzgnpL9E1qmBguVmT0fGvjCmnoF5Vyxw02TWeg/BxIxnPrYDePJWRUTmOx+havy5O9Tmz9k3Om8C
|
||||
EfQ7T4Xkgc3O9ZVtg/mazeWU5euS42am9sV84JGpb2FdIYHKNUQ+7aaQ47Pb81rrHqbXTVV6MuQd
|
||||
+yCmgW2Y/WN6f8R6a86d2lV/1NM+OOU4FHNgHd4VkaKMBHILYAPkFmI8d8SiUYyuq8Unbno3Pv5f
|
||||
X8X/fu9WvPGKSzBx/JicguHwWp/PB7/fj4DfZ5KZVVWVKC8rN14HEAy6F3c82eH7jQmNT0tVHfgt
|
||||
MjIWMwPbBAODb05uBby58qJz0RMOo/FEM5pbWxGLxTG6vg4Txo5BTXWVabaduWycQjt5KX9BXpix
|
||||
2n0SQLQfAR77AdC4s3BEHHdHN/xSmYjQT5dT2+Jid+Zpyn+kkx+uU16f9H2ZZSHPyHwbfqHOZiP1
|
||||
q8ipdLA925iQzTsHGD9PkUQWCcgJYdNetUDe/aSaDNFchANZIUlJEpBUqE5cDMxZr85c8JanLdpJ
|
||||
PpLMpd9OOg5nnjB9rYcGPqjy2WkGk23xz8kDy79pT+pkmiY2k4x0zj1bpZUkA80v7WXJct7/nJF3
|
||||
T6v0Mu84EdWHiFxze1aCeUq/km5+SkdSvg1Hkq/U08gJfN1EoH6qWmxRMcggYpnqgx1cNHeeUCrz
|
||||
o68A+54FTrxm9GsHMxPfeffVR1SfybQGKjJfw/75tSdUP+QEmmhTDZ/t2RiVtdnoz6KrB68cSCIx
|
||||
8EyNi0r/1UeAl/5ujJurVDlx4ZMN889VgWxYJqXWZoebn0GOxdb4yP6SPko5TvMYNSF7wA0uONuP
|
||||
qfp05GVlXkr3MiyTfIiklIm+T/X/3FjlPIb+sJnG8UYbHjU+dWFvpacjeVABvPcZZfVBFwduKlo+
|
||||
67Ir1BzH7D+Cqj25Yc5ZmYm7g5tVYBBrPuSpLPyKbB01UQU+5BySaWDbYfrS3Rx0Jtszff4dfEE9
|
||||
a9Nuo085mh8xPPcstUmdDewb+WwcN1kGrCuLL1b5x37W2gDnPI7Pz7wgycbxefqpzm2b6zL6S6Sl
|
||||
0EBcB7HOzFoLLL3MWWHNvpTPcOD54rWtsXOMvmyO+3Uka7f8Gdj+QOE2Rpn3L92jymPp5c7ELAmm
|
||||
qaeouRMFEdnAdscyzmbtRUKOecqytvf/XAeYxOxaNRZn6luYXgbTpI/Gvc+qvoTtOdd5t17A69ye
|
||||
11r38JkPbs7vXqWQd9nAdss+mHOnGacb/bDRR05c2L8/Yp/Dvpb9EOfVTBMtGNjWwh0jj3bLUs8C
|
||||
OXNvJ4MyMu25MgribOeM16ULJ/PJAmOSHO0KYf2py/CtL3wSP7z9Dnz31l+iprpS+Y/UNQ+30E1T
|
||||
ZX8ggLKKclSUlaHW+P7E8RNQX1eLMWNGY/LECaitqkTdqFrUj0rd3ezo7EJbRwda2zp6iUeaZduf
|
||||
yZ/suKORKCLGwd8YKpB4raqowIypk82jX/Hqmd9nOrsd4vOrQCDJxYmzF1O4zX9RJFy0wAvdVx8G
|
||||
lj+rJhhujrOpNHnpH87RPWlqwsE024KZA8+WvxqT4wNqEshFw2lvNb5zVdKZtpZ5IcSFJw+SnXTc
|
||||
TULzlfvVRDcayr8caJJIH5qr36J+xx9wHnS5COCx8lq1S8mJ8abfqTLiYj7XySKJsZVvzO4vjQun
|
||||
5+80nn2PmjCSHF5wHrD2XWqC6DRx57XLJquFA/Pu2d8qlSsnAEOxI+n2rAT9GjFP245Kvg1kMBVl
|
||||
pDu4uOJCnvWHZnI0E/a62clreZD8YJ/EPmDnI6pf4kS/0BslVt/JiX5tlr6VC0oSqLoLGTnFxUSb
|
||||
v9PdNrjlONEYByYvdSYGqDzd8YjaJNj+kLHoWu28AGT7nb7GeJ4SC2STq5/BoW4nnBewn+W4zr6Q
|
||||
G4c+j1ZAlpqXi2MqX01/d8YYuek3xvj9gNpsyscPNS0XSOZwU2nlG1TddlIo2dNjgfWJaeF4w7rP
|
||||
NGWbU9B6Y+45Ki9yAa1fpmQgLdledz2p5kNe+yzOO2avU8QeSVG3eRtJSh5sL6vfrPom+h3f/CdF
|
||||
DpOQGMgGCglQBkLJBkZ3Zl94Yq+69vwPq3xLLx9LzcX8WfEGRVRzTubmuoiEIutS8/4B1Js65ad9
|
||||
3bud6zJ9Mr54d/HaIFVjJKAaprtfS9Kc9bSnvbBp4CY/fcJzM4sEtxNY33gcdyAjubFHt05TspD0
|
||||
3CDm97c/qMqQmxyzTlNzb0YSZ5t2qv8ULPDg3I5EKufdB7cUT73qVvZuz2uNqayrBzbnd69SzDvW
|
||||
YZKh3HBZ/SZVj5ws7fg3rrd4sA/jmMC6zbSwz+DGazE2dIdibM+m0dPdvzuClZFwYVt1D3/PTxGZ
|
||||
miId0a4erF2xFIvnzMBz217FoSPHzYA28LJe0HXEjcVAOBJFyDi6e0JobWnFM5uew7ETzfAFAhjX
|
||||
0IBxYxswb85MnLpsMSaOH4sxo+tx5FgjHt/4HHa9thubnt+MQMCP7kjEjGRN0s8ycbaUkZFoBJEI
|
||||
F6dVw7DMc1VGChNZMHDA4eDvNrHnbj7JGa/BCXIBFwLcaaPCzD5Bz7hgXKRUnE6R471WCw6i841B
|
||||
8fyPqUW0P4eul4uhS/9TDaoPfwd47SljQBugU20O0AuNSelZH1RqDydyKusCpxY49TpjMD4DeObX
|
||||
yvSFO8e5LLK8+rDhRH6KsWhf9y+KBHE17cmUd59VE+BHvqsmokNBrBXKX89IyrfhQvINFyJy1fWq
|
||||
/xk9Nf/7sQ8gUcNo0E//TPUBXhV5Xuv6oa2K0MlGvJsRspMBybIRoVRKTFqqCJlsOLhVLXoGiyhj
|
||||
etkHuwWuIenAdkcz052PquALVGI4BWBbkgxk030SBLIZKiJy/nnAeR9WBFje5Eulug/nGgy69PTP
|
||||
B9iXaopg4wbiuncpFY5/gGIAktZr3wEsN9rvK/8EnvqpMu8vpQUx54icKy6/Glj/bqU4yqfv4wYM
|
||||
/bOSbNv0W2D/C8oVTqHBtsl+6bx/zUxEZiI2SNywrZKYdHKVQcsVHgMhI7mpM2mR89ybKlmmhRve
|
||||
xcLo6cpfo1u+UBFHFRnVxYUGN85Z30lMsz25tRWSxMy3fNWZFBmMmWnMna8F1rxVzbFyAccLzueo
|
||||
COQ6gIRqMc3pSwmlknccH7ihy819bug6kaFOYwLXclwPsz/a+Ivi9UelSsFkVEZm4mVy4emS4+Sw
|
||||
8xmZjb0d6FGABYdu9sE9qCkrw3nr1hgdtj/3OySUnE+PxREOh9EViqCrpxtHTzRjx569eGn7Ljz4
|
||||
6JP4270PYuWyJVixdCH+dt/DOHrsEJbNnob1y+egKqhh1/7D2HJwDxZNmoZo0jeUpYwMhyPmvYef
|
||||
n9AClLtg4OBkvMpDBO3dT6nd5XisOOmgU2pO6NwUD5zEcuHur8jPvIc+p0jeXfAxtZgcCEhe0jyX
|
||||
i2oG9Nn5WO5R/qjE5KT8nJvUwK7lGYGKEzWSGzzTNPjwy4UlrEgkcOC//PPAzDUDX3wRq65ThMX9
|
||||
/09NthPx0u6nRnq+iTKycLA2DwpFRNpBhcsln1XK8KduU7v8eoEe1HSR0ZI9mjT757Fz1UQ+28KC
|
||||
PnnHzsquXLD8ReaiNs/38aob1GaY2wYBN+So4uLvNe5WRAEVexUOPvvsgWyiJRbIJte5+WCDdYSK
|
||||
w0IRkXaQSFz7TlXm7EtZ57z2pZyjUEFEFQ7vMWZGYdJkknTXqvnJA99U5MxgbTjpLuVA8nbt21W/
|
||||
5ebKIBdCgoQ+5z6P/0jNoWgKXEjQPyQ3aWgO7WXcZfvmRgN99dM/oZNikBYsJDqpEstl45d9JzcV
|
||||
3TbfrT4mVESCa8wsdbiBc40DW1Q07GLgyHalkOOc2mlDnuRT3RSl6M1mIaV7LAOW37n/atTpN7qr
|
||||
e51g9k262jygGw8vbTYXM229xO411HlngcQjN6rob5T38eUZM8PaIK431k+PfF/1R8ONXM5VGelg
|
||||
TT2Co2l7ZRfdDg2FVEgSsbix+OvsUvceAF+gJYPPlPn9xry1GmPqazF90kScttyYyF5l9PFHjuGZ
|
||||
ba/gvoefxFe+8yh279uP7/znTbjhsnOMdWcCOw8ewye//hP8+pmH8cmLr0V1WYX5bH7e13gVNaNp
|
||||
J1KetycUxs7dexGhktNajDANgQDmzpqGqsrUxUBjUwv2Hz6SNIO2rjfG8poazJ05zfR92VtSxt/3
|
||||
HjiEptY2GwGu0jJ5wjhMMg7vrUZ3qAeAMJFFBP1eeVFo7XsuqewoEmhiyMUA/fS4pYc70RUPD5yM
|
||||
9AWVCobqtLrJBcjD1cDFn1ILTfr286pmoKn16cbk/pwb3U1TcgFJ0tPepibeD37DmOS94s1ZvGtz
|
||||
Mlr35CXKFC1fQs0CiVj6zHryJ8qvVKmRVgXxq3YS5VspE3zDJY1c2FP9wYl8oYlIex9w1vtV4JTN
|
||||
f87sbH4g9ZzmpPQ9RfcCwYrs/SF9imWbxJs+ih36eNZn+lelf7vB8mtIhRYJUid1PMebnY+ntje6
|
||||
6aDLBScykvdcUmKBbIZDG2U/ybpy1gcKT0T2zgWMxeuii5SvsIe+o+q3F1CZdfYH1Tibq8LdS5oW
|
||||
v07NIx78llIj28dvfZDrA5WBnCee/1Fj3nR+Ycav9OelKokEJ/Ny810qQEYh6jFJXapNOa90C5Bo
|
||||
gfPQQy8pcpFEoBMZSTKEJus0Ec1FHWm62VmoSLWsz6Yrn+BMTzFBAYAXVRuJUbqKKRbYv9L8l23R
|
||||
LT0cN7kZ0DVA1xck06gIPfN9+ZNpFmaerkyVWQ/oi/BkRankHdvzwguBCz6uVOmF7o+sfu7Vh4sb
|
||||
8KwU5gFZlZFu0bS9KAGHnb9IexTttEjJvc+o53D0LgOVWXOhGuGAH083D9NwiTtuUVv6jGPahHGY
|
||||
NmMqLj7jdNzz+NP48e134q8PPIUVc2dg0YwpmD9zCj753utw9Qe/gG2H9+PMeUuU5Z+m/EiSiCRp
|
||||
aVdAbH1lB27+zBfR2taORNJkymf8WsPoenzjlk9j/apTUtJ459/uxXdv+5UZldu4WzJtGpYsnI+f
|
||||
fet/UnxadnZ14b+/+SM8vvFZ4ycTySqrmf4033LNlfj8x2/0WObo7+BzAOUsGGCny8mQ2ySNaoHj
|
||||
rxkL2q7i5vfRV9Wi2W1ybypRRqkd7IGAk14uJDghzFeJaF9gc3BmlDiasbgpLLhA5SKIqopCEpF2
|
||||
UEHHyRojlpNAyFcdxXpy4b8pZVOhFiTMB5p4cMDvbC5uhPKhWriPxHwbDntFQ5VGuoQ47yNKDVRM
|
||||
mLv8bwL2PGMs7rcXLs8YLZuBI7KRkXQ1Qb922XzQ8e9OfbzlL7KifnDaKNsk1Tg1LhuoVIVysWz3
|
||||
mfza00DTPlWWPgcik4FsNiQD2SRKPPhUOhFForWirjD3I8HDTTsv/RVJIJouM++KCS6mWf67Nyi/
|
||||
2G6grz+a71IhWGgi0l4nqeTr6VAKSQamGopxzlJArX+Pmq/kqzxynNctMeZlN9EBvvLpXQgTScvP
|
||||
t1fTTRLAZjvfySimqq/jxrXTHHlS0lS7KQcycnwyYKNTfnIzh75mT+wv3jhl+fxzq8dsr1TYtx0r
|
||||
7php/sYRdzKyPklGsnwG2q+teL0q10KQab0k1tmqHzm2qzDR4gs1By3kvUoh73g9N0RPf0dhiUg7
|
||||
uAFGdyxc09HvbCmuTbyUrygjC7laGIhSMtluzEjWPsTjCWgl/ISmv0fjqC4L4vorLsaCObNx4ye/
|
||||
gM9//1e47ZaPolbTsWDGFNSMqsSxthZTEZnQdTO6ti/pQzJdGfn0phew5aVXUF1egUn1DeafjrW3
|
||||
mIrGzcbn61f1OailL8oHHn0ar+3Zh/GjRmN0VTUi8RgONp/A/kNHcLSxCfWj+jqeY8b7+x5+DCea
|
||||
WjCtYSwqgmXoDPVgX2sT7rr3AXz+4x/ModXoOZb7cGXcSwzcNfZ56G64S01FR7F3h7jr6mUCykmI
|
||||
113uTKjNsOgkUUcfZSRDGU2NgyEDGZD0pEIn4IFEYiTAYzuA9uPKObMT6G/p7A+4Ow1nuuins6tJ
|
||||
pY9m8kwL00bzcprYO/krY2Aemr688Kf8zQ3MRWmaAogmFQzw0tGo6odpvmmkp2aMSh8nK26E77jZ
|
||||
KnAETcpPxkh2km+C3kl0QCl1GqY5ly/rBzdbWEe4QO61VPCpwApm1Nrx7uT2rNNVfeLiulAk2KGk
|
||||
P8dRWfwrmgFuxmX26cX0knRwUhIe8ugvslBgO2IQN6c0Mf9pipnuM5nWAiQk6Wuq1oHMpAJ/RgkF
|
||||
svE6dWJgFloQFMoqgqoXBgpwU+qy7jC/GIjAibDhOM0Nt9Yjqg+0WQCZcwSa39MVCtuMIzk0V5FK
|
||||
W+9xNhNk/SU5tera7PU/hVDqUuXNczg5t+HYzXTxcPJHyPQvvRQ4YtSZJ27rS1ehN1Gc7keCfs1b
|
||||
VJAHL0Qk859+DhnghP0Wv8N5JvPf6Vkt0IKABDT7PgaSyCewEJGr/zgqs8z+JxmghXMnzummnuJM
|
||||
eLJP48agJ6sYLRl8cL57W2FaimmmT1ce2TaV7KD7ISoWi2kdRbDcvYgMrPYz0P7NjP5c2b8vIfHE
|
||||
4FFsqxy72I9UJCM0k7B1awPsE+gXnGMBSdVC9MGFtA4oxL1KIe94T7rImHuGe3/EuQTHbZ65EcaN
|
||||
/KDRB49KBgB06pd4f6o/2S+4pWm4UmwOY0DAE+eWibeB8DT92k3Qb0a0DkdjwyK9USOdWrQDK5Ys
|
||||
wFc+81G85cZ/x0PPbMG1V12AHa9uQbgnirE1oxBP6GZb95kRthUZmV7k3aGQ+fnSqbNw84VXm5/9
|
||||
+OG/47FXX0Q4nDrIk6wNJ3f8r1q5Dq9btgrH21vxxb/cjmPGORROHRAj0Sjixr2ryspw4wVXYda4
|
||||
idi0Zwe+ds8dJkka16naLKFJtSADSVLnbYLYelj5iSl2Xjcf9OaPhi4KOLAVKj2cQB4zFus7HlWq
|
||||
xmOvqskXiUKShtwho5kSJz9OxJ9lHk2/J50O0aw5mFNp4BY0x0rX838wFrxPql1gDvQc2E3H0cYg
|
||||
ufRitWDMRpZyMnDGu9V3ObHWC1iIHOAZme/5PynfKjRxI1nKOjVnnYqaSfNETi6c8o1g/r58v/si
|
||||
dSgG60LX++Gab7n6JSpln5FDlUaaAtMk2EmJwsUnFQLP3anIERLW1q4868h4o73PWa+UWVRklzlF
|
||||
jaxQi+U9z6ogZIVoC25+I/mbXJybrj3SzC3rp6mFRDYSlX0mTSS72wvrB8sJNIl1U6fzOUg6kvhK
|
||||
/y2SlCuudiYjCQay2XafcY9hFMiG/QuPQmHjr5W6tMelv6oZr4h0J/UwF8AkbF74M7D5r6n+c02f
|
||||
jpMVmXnKlcqUngowJ5KRm5wkLqlezQaaZy+73DlqrVWPuSG54zE1dtPEtTFp4kr/kny2RReqyNtO
|
||||
Pru5QKdrD/bxVvRgzgE4T7Gi0XMOwbmKm+9rLqibMrgJ4L3CGaxeSCIy4AvJaDdSj30BN0zpV5wq
|
||||
TvYRHOd4Dz4vlYAk30ZPcQ/Qx75tzZtVWhsLrAhlubANm/OzqBpfy5KbO9wAZF5wc8/KCwbS4ljN
|
||||
Tb9sZArzhiQq64ZThGf7nIzEt1OkbrMffFHlYzG7Cs5FvWzsM88y9X2FBgktHq4kaplqs4VKD8ln
|
||||
lj2jO7OPZlvlJjHLnGsAbj4sv0qNvW4CDsvsvbWA5FUpKSOHOu9Y9lRpczx1qrvcyGg5ZPSd96p0
|
||||
cQ1EopvtnG2PLidWXqMI0GwKT95/8SWqD3j298PDqkGUkQXIwXQTXGR4DYdrMpjvBgMBUxnZzeAu
|
||||
2rDJCcTa2nHOutVYvGA+HnjqBaw8YxV++Pt7UKdVY8nUmYgYDa3MWJhoSWUklY2JRDyFbAiFFLk4
|
||||
cdRoLJs6y7xuwii1COro7Ey5Nh6PoyuknKuTWFwyZZZxbRsqy9WuWWfa9fRH2W0ctWUVWDR5OuaO
|
||||
n4KOkCKSorGY8dshVFeUu09gUs62z52OlHohGDA4AfOi+KMyMhYtfnpIqHjZCada0RcszIDL3+TC
|
||||
n/6ieLarPzl4vXi3WtTQryPJP074nXb4eC3JBk5oOxszXzPtFLXj5jTBZ1vm4veBb6izPV0kAnjQ
|
||||
lxAd3F/4MTUZzkYsk7CgKQMnyoVyxsxB/elfAI/9WO2Gpk8CuAgzI+Iadef0G5x3sc0F2kylGDnZ
|
||||
MdzzTfxB5gdGe6S/KydVJIO/3PWfxnlz/0jYrCN09M/jgNE/Xfd1YPpKZ9KafRYJy3zJSAutHvxG
|
||||
zjoN2Hp3fzJy6rI+EiXjYvQEcGLP4PloIjFAM2C3gBymv8f9mQO4caFC0oRkixPRQmKMizO6PImF
|
||||
Rm478fK7U5ereu001pL4eui7wKY7+6vHOH7STQAPtqOIkd9r3uS8+WqprbKRkaZac5VyM+AEtlEq
|
||||
kZ/+pdpQSFd6se0wYB+jtp7/EUVkOyk3p69ShCTbBZ+Lde3uW2xkWD1w2WeAc29yTteGXwH/+JL3
|
||||
cuIinWkbN9v9eZlnz//ReObbM5uUm35yLwbOfK/KQycVsmmyeRawZ6MqPydlYC51mPfh5i59UnIT
|
||||
kP0hCS2SpTQRZX3jpk2jzS8i1YCcBy5tUkR1NkxcoNSOXshIL1G02Q+yf289XOT+b5Q63MDN+XB3
|
||||
8fsGtlMvQb4453Gb93itG1TMcdOPAUvYl6f3IySESYzTDP+CjygVtRNYT5zqSi5p1AvYr+oF7qOH
|
||||
Ku8aZqg1lhOZz81bikoe+xHwwl9SFb1cAx19RW1e7X/e6IP/VW0MZatP7P/o0/KVBwfuGqzU58o5
|
||||
RdPOpoi0vy983JZBzpWBOMh0lo5WlJch6Pejk8FnfL5hkxs0udYCAcyZMRUbnnsOn7nle3jm6Zfx
|
||||
jtMuwOiqGkRIDvl1pYw0jngsYRKK9mdv71BmIQFj4dPc1mrmgz+5CDrRkro7HzPu19beaRKWiVjM
|
||||
+HszuiNhlCUHzOaW1pTru7q6EA6HMK62Gt3dPWhqa0EsHDG/z8jezO/qirIcWoLXupBW7sJHDhxl
|
||||
Nd7MtOkrkouwYuc1d9i8ROvmxFZzUEZ6VtQYF77ygDFB/4pSWGTb9WoxJoR//x9FXNLX5CiXwXL5
|
||||
FWqA5oQynTCnisFL4BwO4Pd9TakqsuUJ+4Dn/qhUVhf9mzL9zEqCXKiILqeojF7zjUQBF1qP/sh5
|
||||
cKYC6KHvKRXJvDOd6xrzg+U6WO3Zq7lKISMZDvd8y3UyW+rRtIcijWzLjEhKkx/uxpO8qqhRr7kw
|
||||
Zh/zsDGxP+TB7+zeTWozgotbp8W96au03PlZc1UhuvmNpJKIfiN1LfXm3BRxUoJZ/iIHS+FK1TsJ
|
||||
fSeFOsth+0PK/YaepR/e8bgijJzUafwNmruy3JqGOJDNUM+b3H6fLoca96gz24bpn6xK+XkOlivC
|
||||
gn4FOc7GXMxYSd5xo5FKmHqHcbcyScxkS1v1WGDWWmNxOsf590ga3v8tY2z+gzORRpN9bkwxTfPP
|
||||
yb5BwfpJInv7w8ptw2CUuxm0ZrWzebLVNqiKeuSHRnnc1Wfe3I9k6lGkAMlFLv5JTDr1WSQa2L+w
|
||||
XdFMOu8FVVxtEN//TSOdf0stF7bHF+9RpAPrld06h2uqg0mF4gKHeR/rBNWOLz/gbKrtNYo2zbN5
|
||||
FLudcvxxU6qac5ckSVjs9JD0jHR7q5888l0DsB6QZOKmhpP/Sc6b6U+W5XzOB51N2zkfN8e+ArXF
|
||||
UoymPVR5Rz6CqnK68HACrUm4+bLxd9k3NuNJwQdJyNqkEj9bH8zNMc7H2+4fPgQjstBk2T5DNjJy
|
||||
pCErr2hTz1mKRz15kZNiLvk1Bl2pqqpE04kmo/PyD688SehG2iuw5ZXXzONday/AGbMXoi3c17j8
|
||||
vcrIuGlqbSc/WlqV36UyLYD2jg6UB4LGa9XYmppboSdNvc0xIBJBS0sbgkZj99FXcbsKelMRUDvJ
|
||||
jc0tKffu7Og0xvc4KsvKTBKyvb3DJDHLjAl3T08PWo3fnjCm3p0MSjkjraxdFLDCROYHTr69mGnT
|
||||
j2NiENwccIfdi5NgTmL9BegmqUx78mdqB81t4c+JGAc3Khqrz3bONzom5wKCk9/0ialpdrXaWR1E
|
||||
wuLZ36pJvhdylooELrRIOmQb6OecoSb4XATna2pAMiWT4iMTuLNP589URDmZybFMfSf50Cf5JuBO
|
||||
PY9xSXMk9gdUC/DghPjYdqWM9KoMtNTOTgt7Kqe89PO54KCL30j2f3UTVT9tqd1JKFF95NT3HRxE
|
||||
f5EmOfg6d99/bK9URvY4KEt3PAKc9ha1OeCkel1w7vAMZDPYoFkyD7N9zFOEHRVsJN4ZuZgbfXSH
|
||||
0uXRhx3Heh5OZKSb2opqtilLnRVtHO+3/dNI+z+9+fqjEo/PwTmBZSrIuRYVYpFOtZDn/IvzXfYP
|
||||
+ZKRXsF84gLcTTHMtvHET4FnfuOtPtN1wyP/p/orbpA65eXs05XbEvZx+QaPoAk5zSyz+QRlf0vC
|
||||
MRO4Uc1+iXO/bO4lWHasHw0upto0CZ+YNFd3Ink4d6S6ttjwojA08yecGriraGveuLd65NW83A0n
|
||||
9ily2ksgHPY1nMORUJ/gQCbTCqGsEic9hiLvOHega4sGFxcHOx9XRKmXedTujaq/ZvvNNjeh6pm/
|
||||
S1desQhGCmRlUUDUVlWhtroKhxub6EAyu5+jEgUJw972MKreTDv9QFp6A0awZjRtRUamduKNJ5Tv
|
||||
jVGVleYj06S7skyRFd09PSbZ6E+ad0UiUbS2tyEYCJoBb+iBMmBMFKqCateMqkk7OruVD8HKYBnK
|
||||
jXylAKLMfF1m3Dts3KtdKl+pwysZaaoiTzLil8/DgZQRvOMeiVYSRNxBp6mdmwJm8mJjQHyiv6k2
|
||||
oy6avic15wk+VRBefOdYAz0XblzoZzNdIFFJU26aUuS72Ge+5eIPh6ZRXFS5+ezSfCd3e5N8E/QO
|
||||
zrtTzQEJi1jOxf9n2MNGUbCi8FFw3fxGmn3gMjXRJ3FkTmDmKYfx2dJi+YvsGaS5A/vK6ae6mynS
|
||||
pJbt1okQ4TjCPOGCxsnk1vRjuEZd29U88up9rnvImQJamEREhTdzzlzIFH+Zc0AoEkijXfwy0hyR
|
||||
hJtV513nVhHlboXKTc4pzOitJ9Rcg4pOEltNe9UiPpubhYLvy2vKFN0tSi3TTnPnXU/mRqyT2KOC
|
||||
kIt7J3KY7dNUWNflHziF7Y0KyIG4f+D8ioQLfZo7maxP8GCqTZ/BvMapnvF3+HtdLcVvj16JM7oL
|
||||
0U/CzRNuMvHwCivatxOhxvHN58dJj6HIO7q44aaU0zVc33Aj36uimpu53BTiBgA3P7KtlbkRRt/S
|
||||
dNdyMo3HyD5+BFJMrp0C1mQzzday3FxDCYvJvEZNzsWMm/PkBOpra3DshDHx82nDu97oMAlEe17w
|
||||
iUhGRmMkI2MplaC9S5lpm+SiKRbVe5WOPeGwSV76k3kSjkSNuVoUNWUVJsHI3wn4fMZrRUYq/5M2
|
||||
f5RRpXYgEVkWCJhm5VRVVhjvm8PdCDE6uGc9+ECiaVtqSVnXDhgkOrwQcZaPxmLnNRewXjYKmOZE
|
||||
nmVv+jr5h1ro5HIfEkqnvkGpM5xIIKojSS50pJGRU5Y6K4NMEmqHmogmclADHH5FBQVwiIthKky4
|
||||
g9+dBxnJXUGal/XkEGCCTqTdFgHVowvrkHyoFs8nc75JAJvioscDCck+h/0KzZlqGhRhkM35eqHr
|
||||
sT3P6Lqi2cVvJH3D0Y1ER5KYoZlTpYO1hOUv0jKTLHYAmwVJs12nMYftlhtDZhAhl/txQ4i+7tx8
|
||||
7C29BHgpGcjGaZOPCyAqA/NVALHP4Tg3WH44C9lOM4H1wy3QHcuUii+2E27+kXT2Ev06W9roW5u+
|
||||
yjjuu43dPHJ5vj3Ggr77q8qNFDcoWFbFUs16SRfbMzdT3eoxlVGsx/Rfm2v5UQ06e60xl7rGeR7F
|
||||
OQuPPc8M/JnNTY5tSuE40HpHApUEIcnEbP0FA3RMdjDV5vdoys0NC6dFnhlscOvgjE0Rj4pHS4lY
|
||||
7DSRZNL83srUjJGQx29xE4+WQs2HvN/HCuTjhPKk25VSDTpTiHsNVd5xg8JJFUmQ8CShH8+hD6Wl
|
||||
glPgMoJ9/6hJasOppIkiB5rFyUw7wzxq5JppZ8uwfFxJGmgYXYcjxxuRYKRqY8DXh6nKy5/m85KP
|
||||
QX+R/LwnHkUsFk9xpWiabRuw1I0MckNTbSok6U+S15NIJLp7QkopadyLhKWVhRVlagevsys12l5n
|
||||
l/LrQbNsKihjxsAQSH433pNQ0bpz5SIHUu6CgSPs0fzajF49COorr9G9Q23OgW681IvGXWrQiudo
|
||||
fk7FAne+TRLAQQEzaWFmxY3p382FPKDT9EiOC0cSA26+drijWFadXxkxQiaDUuSyWKKqYQSZNki+
|
||||
CQoG01detSIgLRJyyhJgxqlqMc8IwP4hmjJy0TzPwW8kA3WRMD2e3AWny4GqOuf7dQ+SiTbzdNH5
|
||||
7hGwSY6SGAh3ud+TCjGqJtyUG1YgG2429LRmv45jzFnvcfcv5wb63XviJ0ppXYzF7VAurHsnx2Vp
|
||||
7aRO+SgjIc5gSlTjeCF1s6WtdoJaCPvL3BfCuUbRJQnvVUmZT356vY7kLVWabvlFf7V0KTEQkLCl
|
||||
lcbSi503U0g8MN+zkZFenomKUhIn+SgNSRLTzHTRBdldYriZanPuxb+PmZ79d2iST1+R6ar5YiHm
|
||||
MWBMoMy97hdkvPNoNu62IeGlXnCThhtl8RzmWPSf72Us0Ae53Q72vYYq7+gGxc3fvhfSs18f3KRc
|
||||
OTihdqw6TlbeLVOzz5mQ0b3duLTzJPWf9WBu/5D1nW0MCAaxc99BdHd0oILEWXx4ys19aTtyfEp+
|
||||
FvAbzxQOIRaL9T55KBI1/UCaHE+SgCTZWFVWbhKOza1tiEQjqEwGmWkz8iaRUKbZZSQjdd08qsoV
|
||||
kdnS1p6Sq61tymSESkiaekf0mKnQpKqSpuUdXV1ppZC5zFPPmetCetmmlLIQkgOHVzKyqmFw1Fec
|
||||
hGgeFtaM6pfIM6BO2zHll2kg96DfJk7gnMjI6gx5RgUAzQyCLpMtTlqp5OCi1nPnEHR2DE1MmJf/
|
||||
jm1PkgguVl0oxfZciDSNtHwbDptFpZhG9ilUxFE9zYN9AEkV+hnjBgd9Sw5UKacXOM8OuPiNpOqw
|
||||
3liYB7Yocs40YXYgHkj62YPX6EV8NgbnYARct+AN2x8B2hu93Z/X7X5GmZ/WOLlWCCgShmpLBhXL
|
||||
Bm5mkYik7758QCVHYICqJqr0+P1CRf+mf+ZCBMLgWGcFnCHxyAUq2wbHuEkL1KZf1ejCtRO6ynBy
|
||||
l9G7EG5Vh17CfZ4brL7HDay/HY0De1auwRgQiiSAU59gRdfNJz9JmvDIV43LqOxUgc5yCJ5hmWof
|
||||
y0BGUlU50cVEmyQkVZyRQVIxe1VGclziUfSAOmXqcEMsme580hNOkmPDYU5Wavcairwj/8FxlWsr
|
||||
xzpUroJE5QKqcStdYlxQGcljOMxrM70XZWS+s958WNnk3M/vw8FDh3G0sQlzpk1BrGf4kZGWOXa6
|
||||
qJOfUZFIM2mSkdazd3R2mn4hSULSlJogGUmT7aAxGW5uaTH9RFrXN7W0muRjRbDMJDctyq+2XC18
|
||||
GpuaU/K1qUntMpLc1JJ+LElymibhRlpa29owKGbagoEj5DF6tRkYpRJF9/NQP0mpMN1gKsai+f0W
|
||||
J8IDdcjN77otzphf6eoYLqCoUnYzRafpEo9CgwOpE2HpSWXQ4axKzWfyM5jNebB3kk+GfBsO3e1w
|
||||
HBKotGGgCNOv6yJl6kd3DjRrrqovjI9rvQj5Zvo9bHH2w00/tvSdO26W8rdUDH+RuaadC4/FFyqS
|
||||
wwlMU+NOtQj3ujFERRQJ2hoX4oqBbEg6vLYhN3XJYLcTmpz+/UtDH/3bGlOZr2wnNCE228iSPt+D
|
||||
xfTTxnE76EJcc4M02j345VnoqbAXMpJtgxsHoc6B/w7biZv/as4HvcwJHYmTDnXkC/Z3VG9TlZ6t
|
||||
rjmZalMNzSNrOSbUhsyhbYNXd+i6wYv7BrNO1Bd/DcANBC+bCGGPKjvHZw/l5ndWMLR5RxI/07oq
|
||||
HVTC8yh43axXx8kEzz4jc+FnNAxrZWS/QdXtdQ5m2sFgEIcOH8WBI8cwd9aMYZk1Pk0FqtGTfiLN
|
||||
x9QVGek3GifJyGi0T3nT0tqOjq7uXj+O5vyB0bnLSEb6EeoJIWIz6z5hIxf5d6pHzQB+STKyrS3V
|
||||
11lzUhlZbdzPTkbW8HrjdUtrx8DNtL2Us/CR+aPToxkoJ/pcuPkCuRMquWDcXG/+z6hozNdfjOlD
|
||||
a4AKDaoB3IhMKxiF/f5mVNuyoStvM5BFmRosBuqqgn7t4rGRofAr5Hg6kvKtGAvkkzGN7COolpm1
|
||||
Gph/FrDgHBXgpdAB9gpZj+15ZvmNnObgN5IqIioZqO5zMtGmmWrjntwVQQN5tvqJwOx17otejnWL
|
||||
LlLBPHLpYytHeRtTmSemSedrg9Om9WHaTrgAbZiizNbnnanaCcnIYpCP2Z7VS/RekiOhrtLu93SP
|
||||
/VI2U2QL3Fyj25F8rMyoIO1udS/7shq1gTBQP5rc4Ohuz79c2N8deFH1VdnU4Jap9ug0U232C1Tu
|
||||
NkzNfn/mBe9PJfJg1SGSyWEPhDL7SvZZLA83d0D5gBtWdRPdryPhz7WLnkc9jyZNvYulwC/UBqBe
|
||||
gvcairzzGvm9WODaLcB5js85mF2p9fFelZEZvjtClZHZGKbCKCO7ekLYsXsfzlt/mue9Hfpj5MIg
|
||||
MQhm3T6/30xVIp65kvuTviHtHAKJSQagoZIRFhlpU0Yy6AxNtMv8Koo4v2uZVZOYDIVCvddTRWny
|
||||
JcEyRS7Cir6tdoNN03Yz49T1DIBjXW+mxfic/ierkruYPbZ7576SECZyUMDJDydqnOS5Texp/rTz
|
||||
yfyjGjqBu8Zuk2DCijKcD7hwyNVfpH0iPpDvUmEz1FH2zDRkIZWLpcQbzqbXg+2vp5TzTR8G6R4O
|
||||
aTTdNUwBFp4HnPdBFdRgIH4fORazHZuBvxx8+pobN3px6jmVQvMd/EZyYU4/Swx0UzkAf5F6EcqS
|
||||
viJHT3YnfflMNKcuFqgapTuO47uLXyF1DE7eFrahKCXknNOB9e80yu28gbkoMCM/xlR5+xzaGQPG
|
||||
ZVtkchHq5gaF6dWGIEhmofs8unxx891tEXz5gPOokAfFokUEZyLNvDyTl4BHXsF+ioThkosc5rEL
|
||||
1GEnI7npRMWkU/2lIpL3Tgyi5R5dHpEUJrHnZh5tmak2vla89HBcrJ/ifl1rMjJzqc4xZGla+Lzz
|
||||
1AcXGVYaiknID1V5ZMj3QF4WytpwbghOZJMXKWRmcktLThA2vbgN74tdY6oM426Tc+MrPZEo4pwP
|
||||
BYJpUawLC59PQ3ckYhJ6lcFgxoWDaY5tmk+nsJG9AWz4OmIjI6OxGKLxOKqNtAcDgV6zawac4fUM
|
||||
ZtPR2dV7fSisyAmTjExqLzXzvZqUhI17MyCO38dAODq6upVEu9zMG5Uqv9anwgxHYygeGWm7Rjr9
|
||||
gYMKQ/rAmbnaWbVC0DTl+bvycwLuBJKQ9InmRRlpLlzz9MtEUy/6FtEH2E15WQCld2HcWdN8Q1vm
|
||||
nAxTZaAPUOFqERvFaHelur9QkOiDIyzfRBmZHQxgcNGHgdXXqWjoOS0eu5KbSG1KoUPSgOaqTps4
|
||||
1saLXoQ8O5D085hNKcTno7Jw2jLn4FkH0vxF5truvF7L/o9BKNwC1wwGaNLJcXXb/e4KsUFYeJRc
|
||||
OyGJffqbgXPe5x7ZOR20emAbYZ1iMAOSkSSERjsQHbR2iGTxQ2dF73VcqCZJvOGujIxF3C1muAHi
|
||||
D+T3rP5ydXgZO+N5WMIUIvKyhSOvKlPqBWdn9zfLdk33AS8/2GfKyk0ZpyjarJ+87+FXBrf+sJzp
|
||||
goHEnpsrCrYfHseLREaybx4zE6jzEPWe6tETeSpIuflgbkAUaS54UkfTHoK840aSb4i1elYgJ72E
|
||||
ychclZEOVMzIjqadLdN0eDPXzpLRFWVBvLD1FbR3dqPWeB13UDtqZp0rw4MbNmDX/gO46a1vhD+m
|
||||
uxOYAwBJRi3gx+/+9HeMHV2Pay45H9GeUMbr/GmRwK0ANv4kwRGzDdihcASRWAyjKyrNv5vhXnQV
|
||||
/dokEI2OpKW1b/Lf3ql2HamEtO/tlid3y7pDITOCdl1NlbGuiaO1vd28jtG2eS+VRs0MfkOkR992
|
||||
LPN8zLQF+eFgMgiBGxk5+3S1iOYkIBErfDrmrgMmzHXfnaV5AAnUfJWRdEYfKB/Yd0nmuSkgmM70
|
||||
xYsZMMjDrje/G4sWp4KbQVTyuK/XoEfDjZgqNk6GfBOCMX/QNyTJlVVv9EZE2slHEitHd/Spc3im
|
||||
b9krPuNMRnJBnCjSZirVPN0ufiNXXaMWmdn+no+/yFxBkoCbXgMNAlTQyZ8xjswgUbscePWx4tbl
|
||||
4db/sj6vuBI4693eiEg7+cgzCRazjWxVZwaAYjtxIiNp+pnN3yOVMG5qGNOkuAqDrggpdJ8X9mC6
|
||||
y77LDCSRx7OagbpcXBqwb2BaonkoGy3ipBBgH8Wo2if2KvVjJnBDfeoyRdwdeUUpqXjtWAeyjwEV
|
||||
2ZfmE1V9oOCcnocbGUnrqKlLlQ/gYvgLZBvl5oybIMFOoObTJqLhgfuNH6x267VtD/a9hiLvvPqp
|
||||
NDdTwsUxpTZN00+yxYyDq5vM0bTdKs5w9xfpqo5zemDnTIjFY5g9bRJ6erqxbccurD/1FGNQcRjc
|
||||
6J8xGMTRpib87/dvw9mnr8KKBXMR7ylsB6yZ85dyvLRzN776g5/io+99B/yBIKJ6/7RRPakUkHra
|
||||
58pnpBpzE+gz0+4yzcsry8pMklB9TTeD15BwJKl5orml9/rmZrUzTx+Q9ijVNPP2G9/v6uo2Cci6
|
||||
mkpEY1Hzu/xtXm+pRk2z7qTZNgPY8DecOZt8zLST1wkhmR8OJFWGJBqdQH8xptP9l1UAl0KCjuGX
|
||||
XJxdXWPH0Z0qamm+rhOouggMMEI4J9BuZkxWwBL7/UPd3sy7920GDr9cnAlf497sKgFPvmJClEYP
|
||||
f4XfoPvYOUny7WQjQAYzjdw0XPUGYPW1zgFOOFi3HzUG0aNJMmWL6qd5DNRpv5d55EDyjH7Umlz8
|
||||
RlJ57wRGuj2+Z+CmlF7rP/Of5pVexpnBwuQlSjm6a4M3/81DUdcHu3/hpHH2WmDdDYoAcWonnLu0
|
||||
HlLzgv0vKIUt2wl9QqcTHfnkk9eow71mfD0oSXgpR5pfu20MkHilD2xu6A50nlLpISAECWCaEudT
|
||||
/6IFiLycMj/bouZojNiebYFDFSSDkVHp6MVE2+zntw7NmHkiSUa6gQQ0RQnbHlTpLSQ4nyYRSRLX
|
||||
DSQhT+wzyjTP/tLafBBl5PDIO5a3lzLnRsGeZ4tjbbBnk0rDcFK/u8VecaBiRm40bS/W117UkGmT
|
||||
p1AogqXzZ6GlrR0PPPkMzli3xhhsucupZZnj6IiHI7j8/7N3JnBylOeZf6u6Z3pOaTS6R0hIHEKA
|
||||
BBIgxCEQYC5jwBAcYzuOjR07iX/x7trJXtms1971bhLHsTd2HMdeX8EnxuY2NxYgBBI6EQKB0H2N
|
||||
rtHcV890d20931fVXd3TXdU90z3To3n+UOqePqrrru97vud93xuukZ/8+lH5+3/9iXzn774kDZUV
|
||||
khgYLMpxaKjrb4W09fXLN3/wU5nWOEX+8PabJQ6R1BhaYEI7IEOp2kaGocKpK8ywhENmaq6u07Gr
|
||||
W4mR9ZEax61oKbEQod41Th5IuETdz6OaNphUXWPPxbR/LyUu4vs9vX1KkMTnEa7d3t6h5jWpqkbN
|
||||
QhfYMZQ4qfooHV1KHFUCatBJU4gzMttxQYYPnDZozKPhFCSwLbtDZOfLOsdMsSpGooN45qXaGVmV
|
||||
R+L//ZuDQ8XzOS7cMO3hgATbQblLBrI4I1Xlwn49YucXrv3eKyKrv2d30k+cHscYz9PTY7tZ42DZ
|
||||
y3kZ6xpFLv6AvxiG+z5y4r72M5Etj2sHSCmc6MXcvnBonnfN8PM54ft9HaXfP9j+KICiHF1lAjr4
|
||||
83IUson26dxoI63gicG7TKGznK/JkXp7P12lQ/v9zhOIElvtc2T9A9pV5ueeG2lexWiehT4wyAn3
|
||||
Mwo7FdIbQA5TtEncKsFYF4hwsWjxrmP5fs4txJPPsYvCJsPK3Wc43w84tiGK9neObJ3iA8UV+lv2
|
||||
acH7ovfndnbCBQk3JK6JcGLP9qmijTYh5oe2+FiA/YdrD4TfoAIh8+22+qJVuthYtLtIC2Bo9/PC
|
||||
lf7OZRfk2D25d3TPiVK0Q6wJMK9ibjucwzhG0R7yC9fGAMDz3x7dqvSnQz8jZ85IIw9hJqgyzrjb
|
||||
Ivmoj0EbxX0vFT6QiMdk2tTJMm/WVFm76Q3p7elTFaPj8dw23tjAgMycNlX++xf+TP7sP/9P+eFP
|
||||
H5TPfuLDWpCMxVQBmOECl6MZCktnbFDu/9XD8sLa9fLdv/2SzJo+VQa6szcCVM5IVVBHuyERkh23
|
||||
Ekq3NJxFidrL3Av3pv1aa5vOqQdnZMJ+ErWX2XJylkUc0Qmh1/g8Xm93wrSrwpUyaG+vqHJwGer/
|
||||
iL2s0YGYdHX3OuHafapYTkgtU0j67ecx+wIxEB9UYeCq/dvVrat5Z+a5dHYPigpVVlSobZHbGRl0
|
||||
8NMZOWKQN/KtF3Rlz6CGABpXcCu0HrE7zLuLY4PHb6KIA0K0g0Kf0TB950X74GoZ+X5H5w+V+8yd
|
||||
hVUIhwsZDadIrf/nICj0dacvJyoqQ1wYuNT/+1POsBuF1eXrdivlQMDpmvtwIm238dAWGYtlPOdq
|
||||
u5M6379YTccxkWe+qfPz5lPcQYzRO/ZybbOgvJFBZMsXWYpz9NxrRabOHfsiYpl4C9l4B6EhTqz5
|
||||
ceF5RTOBMIYwUGucXF/gYlROWx8nGcTr1x8U+f13Rdqb8zxPjOGvK6JB1Da0/Nspk5v0dKoAMbKq
|
||||
TuSqPxZZ/iHt6sFxABEaz7FuqgCL3QnvPuWcJ4nS7kdVnKY9eNAUghum9mGIkUgLhGthkBgJB9zJ
|
||||
A8W7BxcDROVAPITYgUH0bODYhRsSRRlRRRvXHT9x7eCbI089NFzg9Nq/ReTwWyJnXe7/WURILb1T
|
||||
pPlduy3+UnEGyjBIhIgB5PINujbjPDiwVf9+uaewsDivos4X10FcD2FG8cv5jKgTpOGaqNoAnZHF
|
||||
6qy5IpOV5W/vJBmvO591Kz6nl52W2GBcbr/pavn1s9+Unbv3ytLzz5V4r3+o9mBvr9x63Ur5i099
|
||||
TL72zz+QeCIuf/zhu6Rpcr2YUADtm7X+qeCj3jCcsjCmqQS4w+2d8utHfid/Z8/3v/zFn8jtN1wj
|
||||
gz29Oc8f03E1Yi7HOtpkd8tR6e7Xy3+qR3daXl632V5PXTVw3ZbtyfdWv7tNFZkBcCq2Op9/beMb
|
||||
0jipXgmbR5p1WMuuE81q+SBIQvzsiUYlZq/nQDQmT65eK3sOHpZjJ1pVQRvkYXl9/06przqo8mlW
|
||||
hyvs7+uGSfOxk/LA489JTVVkiOgLcbK6ukrOntski84+U+pqqjNOhFz737tvKUQWja1PiFx8m92Z
|
||||
nBHsjrzsHp3Ee/0vdSN0JJX/0Hm99lMi560MHpEFO1br8JjBIoRA4WaGkW2MorUdKWyZkXcsKK8N
|
||||
wtmzhTohnAwjyn5i5PT5zvwLyMeEGzC2IVwAEJjxG6NZlfF0E9S43U7/fTnay7jgsmBXHq5xO1/J
|
||||
U4gUfR0ZzaTuWZ2Rb+sOQpBQkw1co9AJzlaZt5j7B0nnF+cRog0nWndrYQNUQfeZyoD8lCh4MT9L
|
||||
IRuIiAU57Epw7I9FBAq2ByY/IGBvfzZPIVJ0KpiKyPC3C0RAtHcghvjdu2fayz3rHB0mmK9o6BYy
|
||||
yuagw3mFYwBOtNcf0OuczeWazz5yC0AECUhYV4iAiICp87leQURfeI0+//O9XunekH0tXK6FvKAc
|
||||
4VhvTMXoWxYTtO8gSJ61PPf1F9FGi2+2H30KM+KaiZBn5MwdSxBx9N5anRMyqC2O6vZXfkyL8+gL
|
||||
jCRKCqL00g+IXHqXdhUHgXsF8usWq5CmVQbH0kRpdxZj2+EajMlPjEQ/Vt3nC+g/IUdx9WR9zS7E
|
||||
lX467ce8qmkbMgGckaUFTscVFy+SqrApv39tgyxbvCjwUFXux0RCbr32SvnGd38iP/3NY3KipVXu
|
||||
+cBNsvCs+dJYXaUcfgn3ppLrxgs3o/1LqEJ9qqdXdu7ZLw/+7ll58vnV0tfXJ3fdfL0Y9u+gwrWR
|
||||
ozEPZySKyRxsPSE/WfeCvHl4rxhh5yZofycSicgvH31KHnjsGb3s9kmF1/a1nZB9G1YP7cPY7z21
|
||||
eq089/Jraj3x23jt1f3vqCl9FQy1XN/+8a8kZBpqPcQyxAqF5OE312edd/Pxk/LXf/dt9b0hYq0K
|
||||
Q0/IWfPOkC985o/kQ++/XiKVFRQwxgrk/XrjKd0QDnJHwtVz6xd1biQIkhi9jxc4OorR9oZZItd8
|
||||
SmTlJ+0by4zg70DY2/SwvaxHi7fPEXa+zV7vjhP5dT6x3BfcqLeRX6cbxztEx/7uocsKMRWNfL9O
|
||||
MfINwR2CEfN8ck9huZZ90O7QLtPzhrsKzlU4KRAC5IZ+uUn4i5HMfTzk2SnWjXqsRpLLbbsVGr5j
|
||||
jdH+KtdlxDkfJIgg/CzbdSPXeY/CMJGa0TlGcm0zN2/kvIsLD9VGvsiT+0aWYy+fcxTuJBSKCXK0
|
||||
o3O98bfFy4t81ce18OK3391CNmfkKGQzHjueIyGfXILYP5jyOk8M3cbIt4J6tnnCDYfcehi49Ms/
|
||||
iYgLpJ1BtEnH8eDfwr537/fZUIVinHDmbU+PLE8eBDFMQbnUlPNvu85bjdDZXGC5ILbt26wjVvIt
|
||||
7FA/VeT864PzA6KtgvDh9qPl5YwE2LcQI/GYq82MgjAY8PYbjMCgMRyWI60MPVK620TeXaP3d5A7
|
||||
EscsikuhY4t0QtgOhV6/DSdMH0IkIqMwwB8Ejoeda/XxNl7dfZzXyOaLnPeYcC/PBVJe4Hh6+4X8
|
||||
RGvDGRzBsYjBUfQxT+3X7RrXlZ7sQ/UVb6BytO7Vfjqh5f/dCeqM9GwprwMyuREtH5dkDsekh5jd
|
||||
Aa+aMknev+pyefip38vnPv6HUqlCtQPcQ/b3BgYGZfrUBvmrP/+EvPzaRvn77/xArrjkYll+0YWy
|
||||
YE6TTJoyWSKRSgm77kfPfo3ZyzEwMCDtrW2y91CzrNu6XV56bb00Ta2XL9x3t/z9vz4g/dGo+h3D
|
||||
R+CAoxFvP7h5rWw+uFvOWbhQFi5ahEyQYrq/anmOJmdeEAIT6lOpj4SUQGp4tnv2z7s6uOmGani3
|
||||
qfN5iJ5evRxbwPTMK/3o1tvHDJvSdqpVtmzeJF/+5vflokXnyJKFC3I7I/0mUhyUO/L9+bkjMYp0
|
||||
878XqZuiBUKMjPe15ydyoVENJ8AV94pc/uH8RkOxn3FjKZYr0gUhJ5d8UDcEIQL4OQlxDsw4y17u
|
||||
j+jvBXWUEGIXzVJ9EyEmbYe1gyLXiDoaaVd8VIeioHJ40HGObYj1QC4fbwgoXCNKKDighU04rvZu
|
||||
GLtwIEImMnBFBjmBIOblG0aMAZ18XNrIGV2oY7FQDjl5IydXFf693o7Sb/uLbtW5foPAfea1X+Qn
|
||||
JOUDrvEIZW2c6/857EcIkntGoZBNuVNVl98xHcpzABttDgy0BrliXedgLiCMYZAPRXVynU84d3Ee
|
||||
HLTbKpsf0alZ/Gg8Q+fHDCog6BawGgmGkf914NhOJwz5Cv9tMm+pjqo5vkcLtUGuSzjhMAi85OZg
|
||||
xzBccAgfLtfz4aCzT3KJkVg/OJ79QBsPwm85rCPckWgjzlkcPMCFdUObEzkz196vBcLOPAf1MSCE
|
||||
432JfU2++o/882l6+wDY1ipnfZuQCQqESPSJBm/OPfCJe8eF79PFZt58OjhCDLUKzrvWPhY/kX5N
|
||||
cl3pmFBgD273917VZo8JwgQtYJNvtZqgyZBs+SV1jkdD7rx+hTz4zFp5fet2uf7KyyTe05vHddBS
|
||||
xVhWXnaRcvE99NTv5aX1W+TVDVtk7pzZKtdjY8Nkqaup0aHUziIM2t/p7u2V5qPHZf+Ro3L06FGZ
|
||||
PW2yfOSWK+Xjd7xPjrS0yv/5l1/klX8SVbDbe7pl++F9ahX/+n98Re6664MSjUnZinIIx4YYqwvu
|
||||
4MDWeXsqK0PS3Nwsn/jIh2Xbli2ybvObsmTh/CzHgkh+1YvIiIFwteG3upGg8jeaAY0R+0Zw3Wf1
|
||||
RXzdL+1O1Ot6BBuj7miQQojDiYAiMeg0oBGKzjgat7jozz7PP3eaF7gv0UEsJJw6Xy7/Q5GuUzoE
|
||||
CoJdtsYUlhOVE2/5D3aHcWnwcr/1nH0DO5w9TAs3uF3r9Mien1MDjoNdr+qOOiqD5rqhIjcRGvez
|
||||
Fw5dLoimmBAeChEXwiRu0EGX4Xwu1afLLadY6zmRtlsQk6brELVSVS3GOQSxaCThYWOxjKqKfcBB
|
||||
AHchlg2OQb8wTwxYIFRuzvnB1yMUmQhVlvYYRocaefwmF7g93XyRpTyn0GGGQORXwRwg1BTOGzjK
|
||||
iwXC7rGflJve9N+fuQrZjNX1b6yuWxjUDApxhsCLQU2IOX4CmOqY3qideEHiF0QSP+csfgvtnHOv
|
||||
1u2ZXMDleM19+nzf/pw+lzM3IkRLFLpB++OiW/wHKXDvVgVOduXYXvH8UrK4xXW8Yg6WA1PmPHDt
|
||||
OrhNd7yRw9pPlEKuSxzbr/5cCwUDWfpUeB8D3UtvE1n1mWABynXBQSAr12MYxwMGl8+/Lnd+U7+B
|
||||
pYSTe/JwmRTawHGBwRgMiuB6GST245hdcosuNLXlCZ1CAFW5MZ9Yv3bx4txEHwCDBzgXEfaPgRcc
|
||||
93BhBp2TLl0ndfTW3o1j2x6bCEVnynnbwUmMcwZ9tTkX5P4c+leX2P2iE7tFju/N3VZEuwhpBzAg
|
||||
lHksuq50zAuDBXh/z4bTS3LLJrkMESML0d5E/MOODKFuA/r6Vaj2efOb5JePPS3XXLVcFYNJ+Dm6
|
||||
nJFEOCj7o4MSCYfkY3feLLffsFJ27N4nm7a9Izt27ZV33tvtOAitZDVsEzci+/mLr22QFUvPk69+
|
||||
8ROyaM4smTW90e4/hOS9A83JNJdBwDWpC9/oBi3m3Xb8mMTLddeieE5NjdTN1G4Ey77xdh09qors
|
||||
VNjrEY32S0VFhR6sNY3inFBkZKz/tcj0BSLXflp3iPMBouIf/E8tZmJ0FI0zVGJEowTHP+aDXBxo
|
||||
sCC/DsSxUAHjLWjYIBQEN4HBaPGPCbg8EXYOaz+qciJXEtwMECXRiMII8bR5Ijd9XjsPg/LpwMqv
|
||||
QrRO5l4OvA+nDsTZXI1VbKNbvqAri6KYBUbmBno8TmZTd65xI73+T/07DACuUozq9XYWZ7uV4rOj
|
||||
Ob5QzGWaKNst39/AsQ0BvFThLEit8NKPshdOKOdlhMiFRq1fJ+/8G0QW253wrlZnECIxtAMIhx8a
|
||||
2ujU5+P2U9cYw3/bjPS4HE7eSG++yJEWV/H7PvLa4b4WdN/BPQbX/1gRq5fDdY97IjrgfgIWQA6+
|
||||
M7MUshnLc3ks2lkQhTH5CRVwby27XeToe9qtmHkeY1/j/ghRBfdHDCIGNrJNPeVaV4To7X5dux4x
|
||||
COvnjkSoKwY6GudpgQeDtG5EAtYLwhyuQRBlgiItIEIe9AmFjfZmFwAzwTJdcqfIG0/q9C0VznJY
|
||||
cZ2eAFXXvezbogdO8Z1wxF/khPiKwn4YmMY+gWiA5cX2rKrV+wKRNys+HLy+EKIPbBN592UdPjxW
|
||||
x3wQWL+DjkjsFzaaU2Br0d9va5ay4b3X7GPiZ/Y+mq0F43yu5difN/yZ3rcQitAPgJjd3aL7Ajg+
|
||||
IOogsgjHIB7zFSHdNjUitzY/5t9+LcW9o1yOt3Kc11htu4Pb9SAFTDO5BnGSqQTs68/an+p2BlJ9
|
||||
uQMuOK7R90O/FKnC0I/yvY/v167hU4ek7PEL087cbwH7cII6I3X7C8KgciJaugazpUZJtbNR/Wel
|
||||
T/g8RLq01yW9qExSs7U/W1tVKffetkr+4cePyu7d+2TRgnkSRQXqXIRC0tHdo4qwNNTXJudbX1st
|
||||
Ky6+QE2WZUjfQFR6evtlEA1Z5wcjlZUyqa5GbvzY5+WmlZfK+66/QgaOtUhvT2+qaItH8MwmhLrv
|
||||
IBx61qQpsqRpvqzZ/ZZ89Ut/I8svX54WmV1uJ8TsuXPlL/7dF2RaQ73sO3BYvv/df5b2tlYJV4Sl
|
||||
+cgR2bxhg5x5RpNcuWyxs+/cr1rJvzP3t2T+TYoHGvS//552Z6ABWlWX/3fRIFnWpF16xWzsbXzY
|
||||
boQ8XtrQDHRcrvojnVBdVUl8R7t84FxEMZkFl2qxL6gzi+MReXfQwPQLJ4c78a3ndTVJvzB1/P5t
|
||||
/1E3dCFuILcabqhorKNji47Winu1S8Sv0YjtiMqjcGuWY6N+wtzgJsh64rjNNz/bcICbLh+nX7kt
|
||||
46G3tEvLz32Fa8z7/lyHRsJV1dOqE6lDwITDD6IarrHIb5RvlWU0ukMlbla2DyNvpJsvspipN7Jt
|
||||
zwtu0KJL4LX75aGCTDHA4BP2e5AYOdMpZPPW88F5/UbzmjXa1y0cR5j89hk6mwjzhPsKg4j4vCo4
|
||||
YOh2S+Mc7YZEYQyIH/mA4zbo2EXoMu7xEJeDUszgvvz+vxS58qM6bQtEU4DQbAg9WK6gtA04N/Zu
|
||||
8nfPoYMNcRHtN7+BDiwPBjhxPkAsQpsNxVUglj/+t0OPfRT32/SI7vTjvPZz9mI9FtvH+LlX2Pvi
|
||||
sHb3or2B17GeWN98XdMQ55D+B8JvuR/D2C+41qPwS1A00ZDvbtdTOaHcvM/q8wcD8PkMdrngfoQB
|
||||
e0zFAoN3GESHUaIQx7g1Dq6H491lOVbbDsfB9ud1nscmH4c12ksQyOFU3/yoE4XRrq+TMJngmrb8
|
||||
Hvuee6m/gxnHIITIYl2PyrE/kmO/hLMWrcncoZbP++N2y1hOu1A/t1SoRsh5tDw5CPV7EAGxobyv
|
||||
Z7OI1lRXyfHjg6pqtDU4KHdef4X80/2PyC8ff0a+8lefs4/DkH0Njmdp75iqkvS6LdukadZ0aZxU
|
||||
m3UDG4YlNZEKNWXtC9ivd3X1iPRHZcAZdccyV4RDEg6HpQOh4qY5RIjET3X19CQFuki4Qm5fcrls
|
||||
PrRL9u7ZLS1HmqXS/n45inJYokq7I7DSbgBdcM45sv71dfLYL36hCuWAzv5eJSR/+t4P2u+fKbk9
|
||||
w0MPdr3v9URhpMigQfrst/TIJfJuoCNb6nxj2cBNY6fdCFn9fbtRe2R09jMazZjQgRkOGD176Yfa
|
||||
FRq0vK/9St9MF13r3yFBZxqhTZhQJRv5SxKDWsjMpwI5rg0QP99dW9xR5fHujBwrEXEiOCPHcr+U
|
||||
8zKiU3X5PbpT7tf4hUhw53/T4X9w6iFEDUJW0wU6lDeoCEsmCI2DMFqsIhA5Q7ULzBvp5ou0Sniu
|
||||
NJ6p3TjomPiBbbxrvS4AVuxjCPM9vksLyX7XereQDQaf3l1T/udaqWjeqY+NuYv9HXkQP67+uHY4
|
||||
o6MJcRvt6Fnn6m1YaMoAtHUw+a0rnHpwaMEFuPzu4Hsw2lFuu6JQ0GmGOxFOxqACJ2i3YZrSlJ9g
|
||||
5MUtbJM5f/SJ3l6tB6fh8gyatzsvCARNi4a37zHovO4BPQhd7NzWpTiGsV8w+IxCNX6Vx7MJHId3
|
||||
iBzdVX73LLQTX39IF5JCTkecR4UKrcUAjl+kCljzb1oIssbZtY0FbEo3X7Sl3nxGD+z4mWZwT4WZ
|
||||
BBPOOeQ1hdEE17N86xUgH+47L48PV2S2bZevMzJL225COiMtOB8THtEpR1Ga5OdC6UVOlDgFnSuz
|
||||
MIvNRYvOlt+vXafCos+bN1uaZjTKfXffJD9+9En54M3Xy5KFZ6twbRUShQnzsP9GKPXWN3fI48+9
|
||||
KJ+9904JI0R6GMLfkkXnyJa390hbW6dUV1ZKPBaTeCIus6ZNkTObptvL9rpcffkyMVEdGw0A+/dD
|
||||
lRVytPmYvLFjpz6nnKrUsyc3Sl2kRrr6++Xacy+UMxtnSnRI8mPDjRT3HF2Z8fzZP1PM7zVW18mA
|
||||
vd473j0gdQP98pFLV0nvYFSFnD+5fYM0d7TKJYvPg5yccSL4FbCR1OtUIksDRnsf+rJuGMKBAweR
|
||||
OYqNEYzab3lc5LnvaOeMlSj+b0DsREchHCnO/LCtfl9AODlC8V6538lJsiTYIaE6NlW6o5X3RdXS
|
||||
neAXHYG0GA2LiZIvstjbhJeq8SmMFHMZ0bGCsIAqqw0BjhMMQmCgIp9zXDmiwrk7jHCYuSkhcuWW
|
||||
K8YxfLDAvJF++SKLtVwolJHP8sAVqar2luBe4+aihAMD4Y9+nOEUstld4kI2VhmfbxjUe/NZ7RKF
|
||||
qOg3GIpjGtvsjAuDzxPkr1Nt+xxdLAjWaOugTRDzuYcjVyCcWlPn6hzY4crib4OEEzr98o9FdrwY
|
||||
nBMSA7bIqZ2PYJgJBjqqc7h2IQgiMgU5TyH8or1SqsFptMngxITTNd8CUmOd9xT7BWIkHLMYiMkX
|
||||
uEbxvXItxoJB7xf+VV+fr/mEjhDKt2BUUY6FDj0ggzb1e2vzy4k6Gm2Jcs2zOx7aYcXcdm4fEQN8
|
||||
i2/Mb4AW12mIl3JG/v0ntAk2PqSNMafz/vN1RnrzQYqI72vWBDsJfI+fVJi2W536uiuWyQ9+9ah8
|
||||
/8Gn5d9/8m4VNvyR21bJs69ukf/41X+U//y5T8niJedLQ0WFqoiNkPCOeEz27j0g3/j+/XLuvDly
|
||||
z63XDXuZ7r39Rvnsf/1b+fnjq+XuW1dKvWmqHJRWZYUsmDNTfv7QE7Lqikvk4iUXSp29yLjsnmjr
|
||||
lR/f/4Ds2LVPt7lQpdr+Xsy+KLvuwivPOl+umL9IujOq9qrclY54mXCqdLsVrhOOwxSfMRwFMeE0
|
||||
wHN/z3CqZmf5HlJEmfq11Pck+bn+wUGJW3Gpr6iTG89fqg5QBJ+v3/euEiMHB2P++zIjJHuIC5Sd
|
||||
/NKApL+//bLOfXjZB/MLKRop9jmnGtRrf2Y3wn9SvIqm2UDn0K12iGqbI2lgY7QNNyw02gvJcbTx
|
||||
Ed3Au/nzOsynmNvXFSKf+JrdsXtO5+4p5k1svI1Sl8MyTbTtVup9Yo3DZXzlZzo8CPnT8nE2B3WE
|
||||
ka8MnXgkc8/l/oPbC86A/W+MvDiL33bHIFZ3nnkjVfGGgHyRIxXMkKvuguuCHRC477z7in0dbynd
|
||||
MbXjZZHLP6zDHv22jbeQzfE95XUsjybvrNEFptB5DCo8FLjsCR36i7YFXMdTc1Q2R3sAjj6EMyPv
|
||||
oR/IpYg2CsK6MZhYSA68fO7dcNwh5+zWJ3UoehDIKX1st27P+LmuswExElOufQxH0Cs/1cuF9D1T
|
||||
zyiuMIX5wpmM9tOLPxA58u74OoaR1ufAmzrvXD5tOKyvqsS9vbzvYWh/Y3+gfbvq01rwLyR103Dv
|
||||
aXA4b3taZPX/09tpvF7b6Iws7Xx3b7Dvpd/R17sLri88YiToHIUQCcMIrn2l7IuWetv5OSP9XpcJ
|
||||
Xk07FaLtik9DQ7S9W8wSZYcUvzDtxsl18jef/5T8zde/K0dP/khuvuoSmTGtQRadPU9+9Jtn5Mu9
|
||||
vXLdVcvl3HlzZerUKdLb2yfv7DkgT61eIyF7/r/+zv+RuprIsM+8yy48V/7sY3fL9371iLy375Bc
|
||||
cfEiJfJteWePbN6xR2bOXSBf+dYP5KarL5fFC8+SHvv312zYKg/97jmZPLlB+vv6lBBpZjZi8XfI
|
||||
EMvjiAiFTGnr7pLWni6Z0zhdIuGwyr95uP2UWvymKVPVvKKxQTnS1iINNfXSWFuntmFbr/09+7tN
|
||||
U6ZJVYVubBy2PwNxdk7jNCUwIsz8cOtJmVxTK1PrJ4thz7u9r1dOdrbLGY34XqXaV80dbTJoN/Ln
|
||||
2PMKGyFt4LAXIGT/dtLsmuUMsLLua+eYkISwivYoglHbJ79uN8zf0bk35jmhT8UeIUVnEBd7OAHW
|
||||
/UrkjadHljMrn8MDnQ2Igas+pRPho8NaqPvTvWFBPEWjrf1Y4cu6/kG9vKvu00n2ixEWD0cN8qr8
|
||||
7h91BUI4c8ZDZ5andflvN+s02WZjBUIpX/o37cBCQa/hihhI6o9O8NP/V4st93zFPxQZjfUtvxPp
|
||||
bi3dBmpz8kaemUfeSHQ4VVGuEuaLxPZFxeUggQDLDKG2lNdJuOmQRL/pvOBOE5yREJUwIFiqfVXu
|
||||
5zFyI+PeBefLpXdq595w7ovIpXhyv8irv9DCxnV/kluMBBgUhLiI8Fm/jYTjFuHaWM4b/lQ7JHH+
|
||||
jfjeHdU5F9fcr9sn+YYqI4IEA6wLr7LXb15hy4HlDkpjgOMXA5t4XPnHOoQ+6Dv5gO0Il+Drv9Ht
|
||||
qPESCpl5LUPeTaxHPuH4KEQIIVKd32UOjj/sG6QLueqjupJ8Y9PIB9IygQiJ/gYGYJAfHWHicEiP
|
||||
TE4o7+tgvsURrTKcV7ncQzAo5F5HEEmi+nGhER6LMd2WWftznSKgnApMlaodkNMZKZJ/JW3vgTOu
|
||||
q2anwrNVGLaVkTPS87oSplDoRlvwdJEb00iF8hrifD4VubTy0gvlsx+9S7741W9LS6+lcjXW1tTI
|
||||
tGnT5NLFC6Wvp1u+9I//ImfMnSszZ86QqVOnSU19gyyYUS9zZ00bUXVDLObnPvZBeWPHe/LbFzbK
|
||||
rhPapRSNDkrzyVb5yle+Js8+86z8w3d+KMuWLZVJk+qlsXGazJgxQw4dOaJ+O2SfYMp96Jlvjb38
|
||||
kybZDYKKlH5dUxmRh7a+Ji/t3CZ/9f4/lBVnLZKTXe3yo9eeV9vlS3d+XGY3TJV1u3fIt1Y/Ltcs
|
||||
XCJ/ev0H1Cb73Y6N8tz2TfL5G++SVYsukpauLvnphhelN9ovX73nPpk1uVG27N8l//zSE7Li7Avk
|
||||
czfeIRX2cj219jl5atsG+Ysb71Tfa7O35c9eeEQ6errkf9z9SZk1qUEG4toB6VYcD4VCnt1ueTav
|
||||
NWRfZ4Zq65D8hFPciHpFSUGewvW/Fdn+e53v7NI7dKhw3TTdkR5J4xs3EHSO4e7Z8oQeGYcoWYpQ
|
||||
uVyd58f/Xjcml31AJ2qvyDNsGx1XOBEgnq57UM9juECQhBB77X0i51+rOxPozBe6beGQhjiKeeEm
|
||||
ijwnhTgiS5WjsBxDAgtplFncbmUnYOTaluNhGRF2ibDqlR8XOWeFdsvlW2AGKSDg8EIeQtc5AnGr
|
||||
tVlfv3I1xDHQgTx3uGZlC/8t1jGM5VmUR97IgwH5IgvdxpnzUQVObsmvAMOOl7RIXMpjJzaoHern
|
||||
rQwWLNxCNm+WSSGbsRr/PfS2yAvf04IIQvEgIuYr3iN1AfbpgTdEXv2lLiIEsP0h+OVKzwJXJIrT
|
||||
RJ4LFqfhWNz2rG6zIIQZ9258fzgOHSwv2kI4L5BPetszWujM+/txXdQBhUeQ5w/h2vkOGmObVmKZ
|
||||
Tf+2F1zM2JbIdXjNH2vhE4PTcFWaBXpoIBJ3nBDZv1WLXchN2dc5fo9hpKjAvoN4HtRugwgJF3kp
|
||||
B2KKCY5z5BzHgA0Enys+rO8nk2foY30k4g/uRbjGnTxgX4ftY2DDw1rwLlVY9mi2s+iMHJ3PvrfO
|
||||
bhPZ/Z4VHxK55A7dDqquLzzPKe4LuGdAEEdfdNOj48sRmWvb0Rk5jG2YFo6bei3zMa1Sdtrr6fNy
|
||||
Q4e1IGjIheeeJbNnzZS//OJfyvRZs6XSvob++ec+Lzdfs0I+cN0KefrF1+T2D9wmd//Bh2T+nJny
|
||||
T//yPXlrwytFO0LOnjdbll60RL7y5S9JqLJKWltOyL0f+ajse2aNnBnT4uhnPvMZuWTZJTJrxlT5
|
||||
5H2flt27dQW+sCNGelfSMHXotuk4ugxnPd84tEf2txyXA/a06ryLVH7Jzfvfk3BFhbT0dMqipnmy
|
||||
5+RR2ddyTOqqq9X3Efr95qG96nt77fduu/gK9fjWkf3S1dcr7X09smD6bDnU1qK+W19doxyTlZEK
|
||||
2WZ/78Cp47LreLPcvOQy6RsclM0HdklftF+Od7bJ/OkzJdbvCQV3llNvlWxVtK0sx4awgvZYglFL
|
||||
5B6EWwEdg4VX252qBTppN0LL0LiPVOtCCdkaY2h0oCM90Ks7oZgwmo9ONRre6GCPZgPEK0g+9ne6
|
||||
YiXEAQityQZ2aKgQgA4DQor2btZhWnC7FCO314FtIg/8tcjFt+o8nbPO1rnelGuhPnvHAuIjOjFo
|
||||
1GL/ICfd5ie02DGchj1uxHBb+d2AVdXSAtcXywfBOYhihpIXY13h3Mon/+dE2W75Lk+p8duW42EZ
|
||||
AYQDXHOuvFfk0g9qEQHXHAgDaiDCHNph6zqlHd0bHtLiinuO432c88gp6yeE4LoGR0u25crnGIbz
|
||||
Ox7zX+99m/W1cWpAXiaE4QblS+vr0gMrEC4KPUexDeEyh6MnKF8uEuF3tpT+mEDS/V2v5+d2g5sT
|
||||
glKpxEhcMyCKVE8e2XFcaiDwPPS/dBg9nFlzztf3RISLYuDQK4LB0YJjBueJyjv5nB7k9LrtcGzi
|
||||
2PPLI4ptD4dNvk5ZiDRH7ON04ZW6QwxnJXK01k3JLdagHYvjGucwiiZhGbGOWN5j7w2/LfPij7TI
|
||||
p1LrnK3bZ6gcm9kxx2/jOMT2wrmP72JAJGhfo42GcxxFhubZ67nkJpGzLtMDqLUNTkXyyND2CtYX
|
||||
bT+c864D7o1ndOX4kXT4248Hh/KOxjGsBMYdIhfd4i+YQ+yFcHno7fHXB8DxgvY/3Gjn2sf6hTdo
|
||||
pzdc/tj3ONbd/Z9NCML5qfoAffq4w7UNuU5xPOF+BoG2WPsJwnlQOwD7rNC2E5YPx1PQMdd+vDjL
|
||||
qNKwdJbfvMZ622UC9/sz39a5HS+7S0dnqH5cg3OvyGLugPg46Ezoj+K6i3sG8hWPR4f2SITLgqpp
|
||||
+1XR9n7eS76vlYkQmXqeHpqdWV07ezVt8bxuOgVVkL/QTK5wXW21DESjMv/MebL44qXSeuKo+kx/
|
||||
f79y2U1rnCxnNM2WpUsvFquvWwYGBlSIs1Ukp9bUKQ0Sje6UCxcttNsp02Trpo32vT1h9y/2igmH
|
||||
Zywu5513rlxs/37nqRMSHRiUCjHFXgoV3myIkVW8Ta6/I76annDThCPSQvwLJXM9pvJpQhxMCbYp
|
||||
UVMcN6orHqr8314hVLk0dY7IpMCIfZHQHw458zcyRWZfO0v6Qa33pZU8HtJfZzXtMQEOnDU/1dOk
|
||||
6TpX2YJL9HM4FyBMovGNBjD2/mCfLgqFBiganSpx93YtvsFNOBYCpGRcQ+Fa2Wo3st55RY/4XnST
|
||||
DkNCoxJ5xzAyjE44cuegqh8ch7hZ5yNUFST22B2EjY/qado83eDD9m1aqDuN7nbF72J50NFGZwbb
|
||||
FJ0shAqNRJiCKAyBxM9Rgf3Ydaqw8w439of/d7CrBaHlo3U+57Ou6IxCgLG43QpanlLjty3HwzK6
|
||||
IL/i89+zO2Iv6nxc8xbrHHloRKNzh+sSzvOeVu0gggMg1zmOXJRIbxF0DGIAwxrmMYzrX5CbEddH
|
||||
3COCtj/EDyyP37zQYf/NV/zd6rnOUdxXnv5WfscBzp/RcCkh//IjX3XCeQNcG1geFSVQomWBqP3k
|
||||
N4NdfMO5bpVCBMEgG+5xSBVzxgXaPQpnVu1U3clExxLHAvINQizBZ7OJXBAiIaT5HRdIgVCoUxb7
|
||||
663V2kEGdyQEurMu1S5Y/BbOKxzHOC4hBLqpaSDAQMxEB72rCII4OvKrfyiy/QXtAj3ncr086JBD
|
||||
uI0PpH4bbRhsLxTdK7TzjX2C6xEmCLf4nfnL9LXLFQEgAKCNjvXFeuM33MEK/HYxqmUj9cSejWN/
|
||||
DKsBow59HPgdW/gMQrSDqqOXM7hvob2MCdey2Xb7dP5SfZzBiY7zEoI+DArhCqe9at/Lot1aKMe1
|
||||
H8cdXMulEopxTgW1A3CeF3qNxXH08v0imx4PuNbnMd98ljHf+8Boz2ust1020E/zXpNwDcZx6RpM
|
||||
MNjipo/BNQnXePwWrru4/ua6Z4xXgTFbf1d8HrN8d2I6Iy2lnKU7IS1xwnVT0QOWE34NHdJyowoM
|
||||
57uG5bzh0Wft1+EgBLWRSiVGtraeUjflaFSLCe5vNE6ul+aj9sEY65PBeHzoThwhTTOmycmWU3Kq
|
||||
tU3qJk2SwcFB1ZAKV0WU0xC5IVtO2BfrxIAqcKMKyDgCZNgpEhOg6Kr5oeL2jEkN0lg3SeL2BkIF
|
||||
b4Rmh0Mhqa+qkQG7kzPVfg+faWrQjTnMGd+bXt+g3huwGw/VlVUy0/5MbSSiwr/hnmyorVPfmzG5
|
||||
Qed/xPca9PemT5qsitpE7BsQfq+nv0+tV9xPcMpmGfbs91T0vqX3cSJYzSejBC7mnS9rcS4TlbfI
|
||||
1GHDsej4WB9V9fQVPbmgMwE3JES/0QofB2istmQ4etChQYMPN89SLI/anyeLvy4QWcvBrVaqdZ0o
|
||||
260c9+N4XMYhwtBOPW18OP36iY4rrkn5nOcjPQaLdQwXc/v3O87I0+k4aDlYJve67uKIQaOJKkLT
|
||||
rIVzFzdHWL4Dm7hvljIcGMIKjjtMCLd2gWiDDrHbiS/1ICycQpiQ90x1fqZoQRJCUrTI+x3b3hWn
|
||||
kr3YSr2+2B6l7OBj3uUgIKDI0syztLvdD4hwGGQZq0H4YoNzCYNYmDLBuYlBdEQSlfKcG83z3HX3
|
||||
FcM5V8xlLNd5lWrb5XNNwn3Ce6/AfcJ1xI/GNXi86G6SW08ZWk3bkzIvpzOy3PMnBW0Tz8p5nZBw
|
||||
OWpnXCLNEQdXZHIS1yHpvq4dkTpU23TCgC2prq6QqqoK6ersTLPsus69KZPrpd1+Lz4Q87gQi6dG
|
||||
NjbUS6c9/2h0IO336yojMrPWvmjHEtLR0a7fc95OiO6IQEg08jiuUG377ktXyvlN82TxGfOldyAq
|
||||
k6pr5HM33KHeh5DY0dsjS+edLZ+85mZZOHOuEgshdN6x9EpZMG2WLJt/rnT29cqkmlq5z/4MhFkI
|
||||
lD3Rfjl/9jy5b+UtsmDGbN2mHRiQDyxdIXMbp8vyBQvt3+uXmkhEPrvq/fbzAVXAJgrRNfeOFydA
|
||||
O7kWluc11x2ZVsjG6wilIFmelEunK+iECTp+yinJOJxSx3bz2CKE109CyGiE1xcDhD72do7d70OE
|
||||
7G4bvd+DS6llgoQ6or+GsFAUnvLLnwjxA65IhHNbE+TcHC/nJzn9gcGstfn0X8/hOCNzGLzCVp6/
|
||||
kzYvr3g5TreglRGarSaIkGkhu85zMdIFSccqmRIknVDiZKi2SKSyQhom1UlrW7v6jYygZ2mcPEna
|
||||
29skFotJKC2hfHHuHPXVVdLX3ydRexKnGA2WYVKkRqbW1kt1qEJOtXWosNbMkOxwlgI22YDrETkh
|
||||
l8xboITCgcFBVYX7xgsvUb8GsTCeiCmB8kPLr5XBWFxV1sY6njtrjlxwxjz1vUH7Nfze9ecvU8vS
|
||||
NxhV866rqpZ7ll+jRM9+iIz2hj57RpMSP6P4PXvb4XsojINwcYihCcsK2O+JtFyh3n2dEiAdETpD
|
||||
lCaEEEIIIYSQUQUOwAuv1zk0/YBbC67I0yEUlBAybrHcf/J1RibdkBkmPa9L0vLOxDuzcVZZO+ly
|
||||
c0J0k8Jjwvk7+ahzIKaHausQbZXF0MBndD5DyzSSodoqHNoMyaS6Omnt6FAhv+521ZWZLZk1faps
|
||||
ePewEvBqK1K5k4qledVUR6QyHJLurq40ZyRCmxGOXRep1ssWS9mHE85OdKtpB4E1hmg4ENNuRFfA
|
||||
hCiY/Iz9GkKue/r7k4eKZPke6Mv4HnJEwiGZ/J792mAspqa07w0OFHxSuEVqks/d40DtH+/+Tz3S
|
||||
GUmGfTW2hMcPIYQQQggpDBQXRAXfi98fnJ/2wJt6YpuTEFLKvm2u/q54ikOLR0fM0R8OZ86vkALC
|
||||
md+zjPFjmEy5Hs3Uc3tKqKIrCTWpvyFOmemuSFWURb3vbmTtjoTH0HTCuFEkZnJttbS3tnmSTbpF
|
||||
VSyZOW2qtJx6UwYGY1JnpBTgYjnwqqsiqohOa0dn2k7Br6MIDBySrada1Pqlqk07B4VpimGchueN
|
||||
uz8td1tbTtEdy3lP73sroV/HZ70TIYQQQgghhBQN5OnGhAIscSfdFCLtUAijfqrIBatEVn1SZPa5
|
||||
/vNBJXEUBmneyW1KCBlbrKFPs3lzwnl83/f98Ri2rUQn00o54vB3QjwClOm8lvobn1d/wxVpOLIj
|
||||
nJGm/q4KxdY6ZFLImzl9qhw/fkxtIMP72/bfyOnY3tGhnJFJMTCjivRIqKwIyySIkW26iqR396AY
|
||||
TENNnRw7flzi8URygd1fDuEGeBqOqKntm1G4yBUjEwlJuiBT+91K7i88ElLwhZIQQgghhJBcNM7R
|
||||
4dcwpLhFPUKVIg0zRRZeKbLkRpGpc4Pnc/gdkV3rR7+QCyGEZOsWW8F95nDyTU8hG2XOM90wVmNo
|
||||
0kmfsOzx4JBMyweZzP0IR5ypw3Qt1w0XtyfDeW4/2iuVMFI1X1zRES5IiFnIl5hQ4do6ZLu+tlqO
|
||||
trXZ2yMkhmk6m0//bmXYlO6eboljBCzkhkUXzxkZDpkqVBxFbFRAtekGSGsxsipcoYTKhLP8qki4
|
||||
812EacM9aTr5L9U62s/xvZBpjpsTwl1+V+zFQ6pITeo4SHhCtBOJVI5I7H99XDjOSApPZCRXZx4/
|
||||
hBBCCCHECxyPt/+lyNnLhz+PgT6R3RtEDmxne5MQUvp+beafGWkdrVx2yGxh2vn81nDcj+53yk2Y
|
||||
1AKToYTIpNCYDMdNf255X1dipKHELa8YCZekmAjtdsK07Te6e/tlx+79cvHyq8UyjGSRGMPeKHAj
|
||||
bntnjzTNmi1VkSqJewuqFMGBB3Ht4JHj0tbZLTOmTxdvhDFcfl3RPjnS1SrL514iobB9CAxG074P
|
||||
DW8wHlOFY1xQBRuvufkaxwOmEXK2q96mg4OD0tHV42jpllTY647JckOznX2NHJfYRwzTJoQQQggh
|
||||
hJSMvm6R/u6RdGxFDr8tsv0FkbZmbk9CyKiRVlMm47m35IaVw5gTdgVDb6GabEVsXDekW9BFAiJ5
|
||||
yzly290wOh+kmSxmkgzJTgpQZtIhiUrtWoBMiZFJ+df+rmmZ2hlpaRfiq1veltfffFfuvOejsnHT
|
||||
Rmk9eUq6e3uloqJCWtu75BePPy9Xrlwlu/fsFWvXbjly5IiaYzFEr31Hjsv//clvZMBeJvzexg3r
|
||||
ZcfbO1Tl7s3Ne2T7iUOyp+uk/NHZZ8vmrVulq7NTOjpTlv7m9lPy1NubpKOvJ1kcZt3uHXKyq0NV
|
||||
vx4vuI7IU1163V54dZMcPnIsqSKf2TRLrrrkAu0KTTj5QtNESC0cx+mMJL5X4BJ8lhBCCCGEnP70
|
||||
dOhpuJ3a9qMiGx8X2bWBbU1CyOj3f60s1bM9kddWZmFsD2H3Opas9ixZKmtn+d2kiCnBEYjue+Ui
|
||||
ThquM9JIuSAt5XxMhWTH43ElLpqG1wGZCtF2SsE428LQ7jpTC5KG/ffmt3fKWXObZPULz8qal56X
|
||||
wcGYVIcNmT29UQ40n1Ai4dHD++WfvvkN9VvHT5yQyy48WwmfI+XZtZvk0edfkckNDfK1r39d7aye
|
||||
3j614M/v364+E6mKyEO/fUgef+wxicXicvDgQamt1hXa9reflB+uf06vs70+eP3J7fYNbvt4PWP0
|
||||
OvzysedSr9jrdcniRXLpheeqkHkIkfEMZ6R+jKupGPuFTOALNhuHhBBCCCEkk97O4ed57Dwp8vLP
|
||||
RdY+INLdxm1JCCl9v1ZyF9TO+p6Vuyuc2xmZUYLbm0fSLcdiDYkXt1Q4sp8rEt8xjLEVKCMSl2hC
|
||||
53fUodo6N6ShxEjtgjQdYVI5Ih0BEnJUmhjp5p403Erb+jnmc/eNV8vlS85TocGGk4ezpnaVnDNv
|
||||
tgp1/t9f/LT09PQmd5dphuS8s+Yp4WukLDyzSe686Rrpj0YlHnPmN7laFp05K+1zAx6X4/yZF4kZ
|
||||
CiX3o7uDUsV1xrGa4jhZ3RyRevUsWWRv71DIUCHoygWZQ4xMOBMFJVKMizchhBBCCCGKvi49FQJq
|
||||
DkCIXP+wyJpfiLQyPJsQUvourJWtSrZHhbQci6TXDel9L1NTyu2MzHA9WlneExnqjMyM8hYr5SxM
|
||||
6pye18aCqsSA9JuhVJ5AQ4fpQoCM23+YyhmJv+NafIw7RWnwfjyV/zElRpqOGOkU77H/nj97uiyY
|
||||
MzNVPMXz+5FwRFYsXph12RAmPFKuXnq+cvzF4onxVOS8xKdO6gRwq2jXVkWSQqQrQrqipBYj42p/
|
||||
qIrjSAxNSCadLSIth+wraST3Z3raReJ01hJCCCGEkAyQLxLuSDhXjIBCoeiPdJwUObZLZOuzIpue
|
||||
EDl1iNuQEFJyrCyuyORr1lBx0lvMZkhuSYchzkhcBy3T87dnBt4Zqb8dt6R3YVzXY6bgmMstORZm
|
||||
oRqJSptVJQnLLUxjKAFK5YKMi8RRXdp+YuKe4DojDee5aIekVnwNCUHcMrUnVIlcBlyV9iNq2th/
|
||||
p4mRbrj3KCixFSHDnkIT/IRJl+4t8VZSt5RrFXvO64rMFCPVc/sx0tcpFp1tJJMNj4qxZ7N9suc+
|
||||
16xDb9sNzR46IwkhhBBCSDoYsD66S4y3XhKpnmR34irtKWJPVfZ7MZHBfl1sFI8th8Ta/qLIW6vp
|
||||
hiSEjC45YrO9QuMQ7dCbM1Ic/dBDmjMyGXIs6cpm0g3peRQjXaBUk5EK1U5b3jIIzfYyOdYjh8L1
|
||||
YlpOjkgVku3kjIRLEnkCRYdqe/NEupOlhEhTQiFTbTMlSGJe9n+miXkk9LwlkSx4I+LJPWkYPJhH
|
||||
43zxqIdeV6TlWIdVrlA8Wok0d6RbSdsbql3Z38kNSobyzlpqjIQQQgghZPi8+qBY6x6yO6kzRSZN
|
||||
0xOewzXZcdyeToi0H2ekFiFkbLCG/pnmhhRPiLaki5LJrwc5I91ckd7KN8mZeR2TkiFCytAFsQx/
|
||||
0THTZTaa+tzM3uOyvW6mEg1VJeVk+LUkw7Kx9Olh2s76KPHRtLdFSAuRcEWaCOk2tRCJMG88xp2q
|
||||
24ZkFSST6y0UJot7nlhZj7OUI1KSgiQESF1F3UoKkimHZEIV9sHfsXhc6tsO0dlGCCGEEEIIKT5w
|
||||
SMLtSMcjIaQMyBYVamX5w1tJ2xuaLRlVtH2raac5H8UrKjohyJmvWYbHDZmajMwV8CSQLBd35Jnt
|
||||
e8WsuVC5ICEGKlEy6Yo0dKEa3BMMT8EaZ8u5QlbIFbXsJ3BBIjQbj5iPmTCSAmRSjMxY4yGiJN2S
|
||||
IzxZLN+TR7shrSF5I4eIkfGEqoyOR4iQyhkZi8mUY+9yIxNCCCGEEEIIIeT0xUeITDMlegTJNMNi
|
||||
FhES2ktmdZTwkDyQuXJHSg5XZMIRJM2hAmZyyTIEycz1M0RGNR9fTX+HzJQeaU7UOWKkFgNd8VEt
|
||||
k5Mj0l1Y5IVUT01H1DLhijTt9bfnYD8ipNs0tTtSF7sRjyDpui4zBUkKkCU7f9JCtK0h+SItp6J2
|
||||
mjvSU1XbW1G7qrdd6tqODslxQAghhBBCCCGEEHK6YWX5O1OMTD230vTChDMFOiOTM/bMVLkErYwK
|
||||
2s6jmrEgp2LGdzAjN9Tb/sc0sguSyUIuOVZyNDi/Z680K3ckitd4lsl9jLsiqV5D5IhUjypPpKke
|
||||
EwktPqrJCdNGJW3MA8Jk0hkpboh2SnykDjkKJ48nRFsfZylnpJsvEi5IN3+kW007YXnzRSak6dh2
|
||||
MRKDjNImhBBCCCGEEELIhCGXKzLwuWiN0NUKhzojZajgmMgUHrO8bolbtMVT3CUj1BtWzJyCpMiY
|
||||
xmovPLlDts1bIC1WvRKikuJg3Jv/UhenUc9D2kEXt0IqT2RIrVsi6YZMPndCvk0zkRaenVm8hq7I
|
||||
UThpPKH1qZMn5YTMGartcUVGop3SdHAzhUhCCCGEEEIIIYRMHLIUqsk0JKYiUdPdkAlPiHZWZ6T9
|
||||
Qsx+LZxSLD35IN0vQ6hRxVns56akHkXriaazMAn3BUd4NI2heSPTk0qO3TY1E4OytGu3vDhpicTN
|
||||
kBiOTGt4NzqmkPunlaqcDVek5YZmp1yRphOunVm4xp1nphBJPbKE50ymK3JIqLZkiJEJncfAU8RG
|
||||
4jGZd3S7VPT3UIwkhBBCCCGEEELIhCFNiJRsLkgrGTmdsIaGaCcyJi9hU6zOhGU0mm5YtscNmfyS
|
||||
5Hq0lPMvnizy4miRhjfc2/6MJak8ipbHRSljW8jm7FM7ZX/VLNldOUOcutqere4pA+QKVkZCEqGQ
|
||||
hOyVN1UFbSdfZLKadsIjRqZESRGPAJnDKUmKdLKk5Yp0TxyPIGml545Md0a64dv2Yzwu07qOyNyD
|
||||
myhEEkIIIYQQQgghZMLgFSJliCPSSnNHerXDZIi2W7gmJUamSSthw5Au+8VGb1j2EEHScGaC/IpO
|
||||
ERrDSlWaxnNV68UjSor7t6Udknrp9QcsSYmSyY+OgSYHd+RVJzZIz4yrpblyimiPp7PBnarZqSrM
|
||||
lnI/KqEqGZrtKVrjPHdzRLrP9brlL0RSnCzwBMlR+cgbmq2PPivtNSU6enJIWp4wbUz1fafkgt0v
|
||||
SXiwn2IkIYQQQgghhBBCTnu8Ekv2EG3LE6KdYVh0pvhQV2Tcnvq9vxMOiXHS/uCZ3pmrfHrZwrK9
|
||||
j4b+wTicj8mwZEklkHRsj65TUoVzOy5JEY9TMmOFR1uKq+nrlGtObZXVUy+RU5UNSlRV28FM5bp0
|
||||
BSvTMpxHe0o4Idr2cyPuPPfkiTRM7bLMDMnOVszGC8XIQk8UK/C9lDMy9bqVsNLyRybcv+2DG9XW
|
||||
Lzi0Xmo7T1CIJIQQQgghhBBCyGmNleNvb0h2mm7oefSKkfHkY5orcjCRsDq880eY9tEECtF4qmTj
|
||||
edxxNHofDSUmGskwa9fdGPcEXCcX2MkXaXo+J+J5TAqTRsbro7/RG7qPyXWyRdY1LJZj1dMkboRT
|
||||
65MM64XwaCrRKhWW7QiQYqTckU41bTdM27tOmQ7JfJRXipMZJ4RlFXQmeUO00/eno+ZbiaQgadjP
|
||||
J/e2yPlHt8rUk7uGVHsihBBCCCGEEEIIOd3IlFpcF6T7XmbeyDQx0hUgRWuH8YSVFCYdcTIqhrR4
|
||||
5x8OGcYhHbaaKkqjn8NHCZENMxIxTIQ1O45Ixx0ZT3ND6rhty/k7mTcy6YrUC21k5Jc0nJUzPEVu
|
||||
xkJ+a+g8Ktf3dcrrsy+T5sg06ZVqpeRahlPQxzJ1KK9yQxra5em4I9PFyERaARu9bhkipOQnulKI
|
||||
zHWSWAWdTEMFyfQCNvgjEuuThu6TsvjweqnrOkkhkhBCCCGEEEIIIac9OV2ROapp53JFus/jQ6eo
|
||||
/YWT3t8IhwxrjSXG56ABep2R7vO4Iw6ajsrpdUeKOKKalVpky+OQdCdP1PaQaUieSRm7ojYVAz1y
|
||||
5aHXZN+MRbI7Mks6qhqk34zY622Km1PTUO5ILGPCI0C6OSONdEekZHdIJtczQGw0hGJk9hMlWIzM
|
||||
WsjGdUR6PhOJR6W2r13mdB2WeUfelHCsX+c/JYQQQgghhBBCCJkAZBMkUwav3GIkHuOeEG1ETiuH
|
||||
pOd1+3udplhve+cfnlw/8Fjrqcgp+wPTTCOjYrYB55iRJkrqkGsrJZThTVMywmKN/MRI541MyW1M
|
||||
Jbj4oCw4ul3OCL8ne2YuliPhKdJXUS0DFbUSM0P29girytsGcmpa2gGZDM1OOGKkux45Kmrngk7I
|
||||
YZ40AU5Jy3MGhRMxCVtxVZgmEu2WmdFWmXvsLYn06fQFFCIJIYQQQgghhBAykQhyRwY5I90cka4I
|
||||
GUskXZGI2j5kVFqveOev1K+fLa/8TU/c+FDI/gtT2FDJJJ3nhnoedl4POe+Fnffc73gniJrQJ1W1
|
||||
6eTzlACZJkZKmYmRWeiony3HJjdJd6hWes1KGZBQKi8kltfUVbjdStruc8lRRTsXlCKLc9IMed/5
|
||||
QNg+Leri/VI90CsN3SdkStshMRKD3ICEEEIIIYQQQgiZsGQVI7MIkdnEyKQQ6fztCpEx7Yxst8S6
|
||||
f+WL/V/wzh+VWiQSkt92x6y7EmKEXXek64RMwGJppQcMpwrS6LBsK2OBQ+Ip+e28j3l6UkumPRdJ
|
||||
F+LKTZSr6zwq59gTOb1gXkhCCCGEEEIIIYRMdDJ1vTQhUjxiZJo70koKkGmh2o4QifozCbFOhC1r
|
||||
TebvKTESododA5W74yKLElbKuahzROpfM7xVr93Q7ORipvJEmp5HNdnfTVbUdp2EGYKkSG6XZC7o
|
||||
IiSEEEIIIYQQQgghJDdWIZ/LKFqjHh3B0bJc02H24jVDC9dYUfs774SrjNWZv6XEyFuel/4HVhj/
|
||||
3DZgfSMhRlUy56EzM/08JToqHEHS64DEAoScStrJ8GzRgqMWJK0MV6Qnv2KGKJkJxUdCCCGEEEII
|
||||
IYQQQoaPlePFISKk8w8ipv3CtJMh2smckZZyRdp/NocM69HLn4u2Z/5c2H0yZVL0xz1tlfcOJORa
|
||||
91fjnkrZKe9j6hl+1TLcSTsglSgpOlTbdByQpqTclqY3NNuy0vJI6teybywWdyGEEEIIIYQQQggh
|
||||
ZJhYbuHprG+lHrOEaafckbmdkQnLcoXJPvvppnDEeCzbb6UpfA9eEbnj1ID1U/vlBm8xmqFFaoxk
|
||||
MZtshWtCbuEaV4j0PHqL13gra4vQFUkIIYQQQgghhBBCSCkpxB2ZTYj0q6ZtP8btD7xVYVh/c92a
|
||||
6JPZfirs/ePD66NP/Nvyyp/3xaw/SYhR7b5uZF1sI00ldUO0dQEcSzkgtSBpeMK0PUKkJYGuSJoh
|
||||
CSGEEEIIIYQQQggpMlaOKtqSvZp2dmekleGMVB85FjLkkbpJ0edz/XQ484XZDQP/6XBrZE40Yd0h
|
||||
YoQzF0jcHzfsXzAMFaKdcJyRKo2klXJT6nyRngI2YiSfA68o6f7txbB4bBBCCCGEEEIIIYQQUmys
|
||||
jD+yFa8ZKkhaSUekK0zGU+HabaZhPVJVaXz7st/JQK7fzeo9fOjKigUtUeNn9syuNAzD9IZr6zDs
|
||||
9BBuuB/d97zh2e5zb2i2G67t/jgEylyVtGmMJIQQQgghhBBCCCGk+GR1RnoESPEpXpN0SKbCtbsM
|
||||
kadqw/Lfrn+5f6/f7+bU+357VWTZiT7r2/bTyw3DqMwUI80MgdINyc4mSHrDsU1vWLb7aKTnjhTP
|
||||
64QQQgghhBBCCCGEkOJhZSiRQyKiZahDMpH90bIfOwxDnq0Nyz+87+X+LUG/7Sv3wSF5asD4VtyS
|
||||
6+2P1mU6HnO7IY2chWvSxMg0h6RnoYwRLDQhhBBCCCGEEEIIIURhBbxpZfmsZQ0tZJNFiIQp8oQh
|
||||
1qO1YePrN67xd0S6BOp6j62ondISj31tMGHdaokx2/5COFsottcFmXrNyOmINHI4JPNZKIqRhBBC
|
||||
CCGEEEIIIYT4YxXwvtctmXRGZuSN9AiRUVOsvaZhPFwXkW+878X+tnyXKW9d75crIn/eOWD9gWHI
|
||||
YkuMmYauRZM1P6SZ0xGpfy4zLJs5IwkhhBBCCCGEEEIIGT1yVdOWHGHals4hOWCINBuGvBExjV9V
|
||||
1PQ/esvzuYvVZKMgvQ8uyR4r/iedg4nbTNM4x16A6fYcqjJDsnOFaAeJkEbG4jBnJCGEEEIIIYQQ
|
||||
QgghRcbKJkamXskI00Y4dq/95KQY8m61KQ/VRoyHC3FDehmW3AdRsk9i93bHrFUxy1hgGjLLnlGj
|
||||
ky6yWiTlmswWkj2SEO2irQQhhBBCCCGEEEIIIacZ1ki+o0XImCHSb/8Vt59DcDwSMqx3I4b5ckUo
|
||||
8fxtaweOjWT5RqzjPXZNZHb/QOLW/rh5hT23ioG41WQZRiRNjJTsxWqGOiOHuRJUIwkhhBBCCCGE
|
||||
EEIIGVIpO+/vOQ+GSG+FKcfsx54Kw9hSKcaLt63rO8gtSwghhBBCCCGEEEIIIYQQQgghhBBCCCGE
|
||||
EEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBC
|
||||
CCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQggh
|
||||
hBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEELy5P8L
|
||||
MAChisOdlmRgOQAAAABJRU5ErkJggg==" transform="matrix(0.1433 0 0 0.1433 2.884035e-02 -0.2508)">
|
||||
</image>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 58 KiB |
@@ -173,7 +173,7 @@ An additional column `audios` is required. Please refer to the [sharegpt](#share
|
||||
|
||||
Compared to the alpaca format, the sharegpt format allows the datasets have **more roles**, such as human, gpt, observation and function. They are presented in a list of objects in the `conversations` column.
|
||||
|
||||
Note that the human and observation should appear in odd positions, while gpt and function should appear in even positions.
|
||||
Note that the human and observation should appear in odd positions, while gpt and function should appear in even positions. The gpt and function will be learned by the model.
|
||||
|
||||
```json
|
||||
[
|
||||
|
||||
@@ -172,7 +172,7 @@ KTO 数据集需要提供额外的 `kto_tag` 列。详情请参阅 [sharegpt](#s
|
||||
|
||||
相比 alpaca 格式的数据集,sharegpt 格式支持**更多的角色种类**,例如 human、gpt、observation、function 等等。它们构成一个对象列表呈现在 `conversations` 列中。
|
||||
|
||||
注意其中 human 和 observation 必须出现在奇数位置,gpt 和 function 必须出现在偶数位置。
|
||||
注意其中 human 和 observation 必须出现在奇数位置,gpt 和 function 必须出现在偶数位置。默认所有的 gpt 和 function 会被用于学习。
|
||||
|
||||
```json
|
||||
[
|
||||
|
||||
111
docker/docker-cuda/README.md
Normal file
@@ -0,0 +1,111 @@
|
||||
# Docker Setup for NVIDIA GPUs
|
||||
|
||||
This directory contains Docker configuration files for running LLaMA Factory with NVIDIA GPU support.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
### Linux-specific Requirements
|
||||
|
||||
Before running the Docker container with GPU support, you need to install the following packages:
|
||||
|
||||
1. **Docker**: The container runtime
|
||||
```bash
|
||||
# Ubuntu/Debian
|
||||
sudo apt-get update
|
||||
sudo apt-get install docker.io
|
||||
|
||||
# Or install Docker Engine from the official repository:
|
||||
# https://docs.docker.com/engine/install/
|
||||
```
|
||||
|
||||
2. **Docker Compose** (if using the docker-compose method):
|
||||
```bash
|
||||
# Ubuntu/Debian
|
||||
sudo apt-get install docker-compose
|
||||
|
||||
# Or install the latest version:
|
||||
# https://docs.docker.com/compose/install/
|
||||
```
|
||||
|
||||
3. **NVIDIA Container Toolkit** (required for GPU support):
|
||||
```bash
|
||||
# Add the NVIDIA GPG key and repository
|
||||
distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
|
||||
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -
|
||||
curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list
|
||||
|
||||
# Install nvidia-container-toolkit
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y nvidia-container-toolkit
|
||||
|
||||
# Restart Docker to apply changes
|
||||
sudo systemctl restart docker
|
||||
```
|
||||
|
||||
**Note**: Without `nvidia-container-toolkit`, the Docker container will not be able to access your NVIDIA GPU.
|
||||
|
||||
### Verify GPU Access
|
||||
|
||||
After installation, verify that Docker can access your GPU:
|
||||
|
||||
```bash
|
||||
sudo docker run --rm --gpus all nvidia/cuda:12.4.0-base-ubuntu22.04 nvidia-smi
|
||||
```
|
||||
|
||||
If successful, you should see your GPU information displayed.
|
||||
|
||||
## Usage
|
||||
|
||||
### Using Docker Compose (Recommended)
|
||||
|
||||
```bash
|
||||
cd docker/docker-cuda/
|
||||
docker compose up -d
|
||||
docker compose exec llamafactory bash
|
||||
```
|
||||
|
||||
### Using Docker Run
|
||||
|
||||
```bash
|
||||
# Build the image
|
||||
docker build -f ./docker/docker-cuda/Dockerfile \
|
||||
--build-arg PIP_INDEX=https://pypi.org/simple \
|
||||
--build-arg EXTRAS=metrics \
|
||||
-t llamafactory:latest .
|
||||
|
||||
# Run the container
|
||||
docker run -dit --ipc=host --gpus=all \
|
||||
-p 7860:7860 \
|
||||
-p 8000:8000 \
|
||||
--name llamafactory \
|
||||
llamafactory:latest
|
||||
|
||||
# Enter the container
|
||||
docker exec -it llamafactory bash
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### GPU Not Detected
|
||||
|
||||
If your GPU is not detected inside the container:
|
||||
|
||||
1. Ensure `nvidia-container-toolkit` is installed
|
||||
2. Check that the Docker daemon has been restarted after installation
|
||||
3. Verify your NVIDIA drivers are properly installed: `nvidia-smi`
|
||||
4. Check Docker GPU support: `docker run --rm --gpus all ubuntu nvidia-smi`
|
||||
|
||||
### Permission Denied
|
||||
|
||||
If you get permission errors, ensure your user is in the docker group:
|
||||
|
||||
```bash
|
||||
sudo usermod -aG docker $USER
|
||||
# Log out and back in for changes to take effect
|
||||
```
|
||||
|
||||
## Additional Notes
|
||||
|
||||
- The default image is built on Ubuntu 22.04 (x86_64), CUDA 12.4, Python 3.11, PyTorch 2.6.0, and Flash-attn 2.7.4
|
||||
- For different CUDA versions, you may need to adjust the base image in the Dockerfile
|
||||
- Make sure your NVIDIA driver version is compatible with the CUDA version used in the Docker image
|
||||
@@ -1,11 +1,12 @@
|
||||
# https://hub.docker.com/r/ascendai/cann/tags
|
||||
ARG BASE_IMAGE=ascendai/cann:8.0.0-910b-ubuntu22.04-py3.11
|
||||
ARG BASE_IMAGE=ascendai/cann:8.1.rc1-910b-ubuntu22.04-py3.11
|
||||
FROM ${BASE_IMAGE}
|
||||
|
||||
# Installation arguments
|
||||
ARG PIP_INDEX=https://pypi.org/simple
|
||||
ARG EXTRAS=torch-npu,metrics
|
||||
ARG HTTP_PROXY=""
|
||||
ARG PYTORCH_INDEX=https://download.pytorch.org/whl/cpu
|
||||
|
||||
# Define environments
|
||||
ENV MAX_JOBS=16
|
||||
@@ -28,6 +29,10 @@ RUN pip config set global.index-url "${PIP_INDEX}" && \
|
||||
pip config set global.extra-index-url "${PIP_INDEX}" && \
|
||||
pip install --no-cache-dir --upgrade pip packaging wheel setuptools
|
||||
|
||||
# Install torch-npu
|
||||
RUN pip uninstall -y torch torchvision torchaudio && \
|
||||
pip install --no-cache-dir "torch-npu==2.5.1" "torchvision==0.20.1" --index-url "${PYTORCH_INDEX}"
|
||||
|
||||
# Install the requirements
|
||||
COPY requirements.txt /app
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
@@ -66,6 +66,12 @@ EXPOSE 8000
|
||||
ENV http_proxy=
|
||||
ENV https_proxy=
|
||||
|
||||
# Set no_proxy environment variable
|
||||
ENV no_proxy="localhost, 127.0.0.1, ::1"
|
||||
|
||||
# fix pydantic version
|
||||
RUN pip install pydantic==2.10.6
|
||||
|
||||
# Reset pip config
|
||||
RUN pip config unset global.index-url && \
|
||||
pip config unset global.extra-index-url
|
||||
|
||||
@@ -290,3 +290,15 @@ llamafactory-cli train examples/extras/llama_pro/llama3_freeze_sft.yaml
|
||||
```bash
|
||||
bash examples/extras/fsdp_qlora/train.sh
|
||||
```
|
||||
|
||||
#### OFT Fine-Tuning
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/oft/llama3_oft_sft.yaml
|
||||
```
|
||||
|
||||
#### QOFT Fine-Tuning
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/qoft/llama3_oft_sft_bnb_npu.yaml
|
||||
```
|
||||
|
||||
@@ -290,3 +290,15 @@ llamafactory-cli train examples/extras/llama_pro/llama3_freeze_sft.yaml
|
||||
```bash
|
||||
bash examples/extras/fsdp_qlora/train.sh
|
||||
```
|
||||
|
||||
#### OFT 微调
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/oft/llama3_oft_sft.yaml
|
||||
```
|
||||
|
||||
#### QOFT 微调
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/qoft/llama3_oft_sft_bnb_npu.yaml
|
||||
```
|
||||
|
||||
43
examples/extras/dft/qwen2_full_sft.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
### model
|
||||
model_name_or_path: Qwen/Qwen2-1.5B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: full
|
||||
use_dft_loss: true
|
||||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: qwen
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/qwen2-1_5b/full/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 1.0e-5
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
|
||||
### eval
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
48
examples/extras/fp8/llama3_fp8_deepspeed_sft.yaml
Normal file
@@ -0,0 +1,48 @@
|
||||
# FP8 training example with DeepSpeed ZeRO-3
|
||||
# This config demonstrates FP8 mixed precision training using HuggingFace Accelerate
|
||||
# with DeepSpeed providing memory optimization (not FP8 handling)
|
||||
|
||||
### Model configuration
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### Method configuration
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: full
|
||||
|
||||
### Dataset configuration
|
||||
dataset: identity
|
||||
template: llama3
|
||||
cutoff_len: 1024
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
|
||||
### Output configuration
|
||||
output_dir: saves/llama3-8b/fp8-deepspeed/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
|
||||
### Training configuration
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 5.0e-5
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
|
||||
### FP8 configuration
|
||||
fp8: true
|
||||
fp8_backend: torchao # Use TorchAO backend for FP8
|
||||
fp8_enable_fsdp_float8_all_gather: false # Not used with DeepSpeed
|
||||
|
||||
### DeepSpeed configuration
|
||||
deepspeed: examples/deepspeed/ds_z3_fp8_config.json
|
||||
|
||||
### Logging configuration
|
||||
report_to: wandb
|
||||
run_name: llama3_fp8_deepspeed_sft
|
||||
51
examples/extras/fp8/llama3_fp8_fsdp_sft.yaml
Normal file
@@ -0,0 +1,51 @@
|
||||
# FP8 training example with FSDP
|
||||
# This config demonstrates FP8 mixed precision training using HuggingFace Accelerate
|
||||
# with FSDP for distributed training and float8 all-gather optimization
|
||||
|
||||
### Model configuration
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### Method configuration
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: full
|
||||
|
||||
### Dataset configuration
|
||||
dataset: identity
|
||||
template: llama3
|
||||
cutoff_len: 1024
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
|
||||
### Output configuration
|
||||
output_dir: saves/llama3-8b/fp8-fsdp/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
|
||||
### Training configuration
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 5.0e-5
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
|
||||
### FP8 configuration
|
||||
fp8: true
|
||||
fp8_backend: torchao # Use TorchAO backend for FP8
|
||||
fp8_enable_fsdp_float8_all_gather: true # Enable FSDP2 float8 all-gather optimization
|
||||
|
||||
### FSDP configuration (using training arguments - no separate FSDP config file)
|
||||
fsdp:
|
||||
- full_shard
|
||||
- auto_wrap
|
||||
fsdp_transformer_layer_cls_to_wrap: LlamaDecoderLayer
|
||||
|
||||
### Logging configuration
|
||||
report_to: wandb
|
||||
run_name: llama3_fp8_fsdp_sft
|
||||
46
examples/extras/oft/llama3_oft_sft.yaml
Normal file
@@ -0,0 +1,46 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: oft
|
||||
oft_block_size: 32
|
||||
oft_target: all
|
||||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/oft/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 1.0e-4
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
resume_from_checkpoint: null
|
||||
|
||||
### eval
|
||||
# eval_dataset: alpaca_en_demo
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
47
examples/extras/oft/qwen2_5vl_oft_sft.yaml
Normal file
@@ -0,0 +1,47 @@
|
||||
### model
|
||||
model_name_or_path: Qwen/Qwen2.5-VL-7B-Instruct
|
||||
image_max_pixels: 262144
|
||||
video_max_pixels: 16384
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: oft
|
||||
oft_block_size: 32
|
||||
oft_target: all
|
||||
|
||||
### dataset
|
||||
dataset: mllm_demo,identity,alpaca_en_demo # video: mllm_video_demo
|
||||
template: qwen2_vl
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/qwen2_5vl-7b/oft/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 1.0e-4
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
resume_from_checkpoint: null
|
||||
|
||||
### eval
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
44
examples/extras/qoft/llama3_oft_sft_awq.yaml
Normal file
@@ -0,0 +1,44 @@
|
||||
### model
|
||||
model_name_or_path: TechxGenus/Meta-Llama-3-8B-Instruct-AWQ
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: oft
|
||||
oft_block_size: 32
|
||||
oft_target: all
|
||||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/oft/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 1.0e-4
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
|
||||
### eval
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
47
examples/extras/qoft/llama3_oft_sft_bnb_npu.yaml
Normal file
@@ -0,0 +1,47 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
quantization_bit: 4
|
||||
quantization_method: bnb
|
||||
double_quantization: false
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: oft
|
||||
oft_block_size: 32
|
||||
oft_target: all
|
||||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/oft/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 1.0e-4
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
|
||||
### eval
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
44
examples/extras/qoft/llama3_oft_sft_gptq.yaml
Normal file
@@ -0,0 +1,44 @@
|
||||
### model
|
||||
model_name_or_path: TechxGenus/Meta-Llama-3-8B-Instruct-GPTQ
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: oft
|
||||
oft_block_size: 32
|
||||
oft_target: all
|
||||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/oft/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 1.0e-4
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
|
||||
### eval
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
46
examples/train_lora/gpt_lora_sft.yaml
Normal file
@@ -0,0 +1,46 @@
|
||||
### model
|
||||
model_name_or_path: openai/gpt-oss-20b
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: lora
|
||||
lora_rank: 8
|
||||
lora_target: all
|
||||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: gpt
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/gpt-20b/lora/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 1.0e-4
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
resume_from_checkpoint: null
|
||||
|
||||
### eval
|
||||
# eval_dataset: alpaca_en_demo
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
@@ -4,11 +4,11 @@ build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llamafactory"
|
||||
requires-python = ">=3.9.0"
|
||||
dynamic = [
|
||||
"version",
|
||||
"dependencies",
|
||||
"optional-dependencies",
|
||||
"requires-python",
|
||||
"scripts",
|
||||
"authors",
|
||||
"description",
|
||||
|
||||
@@ -1,27 +1,35 @@
|
||||
transformers>=4.45.0,<=4.52.4,!=4.46.*,!=4.47.*,!=4.48.0,!=4.52.0; sys_platform != 'darwin'
|
||||
transformers>=4.45.0,<=4.51.3,!=4.46.*,!=4.47.*,!=4.48.0,!=4.52.0; sys_platform == 'darwin'
|
||||
datasets>=2.16.0,<=3.6.0
|
||||
accelerate>=0.34.0,<=1.7.0
|
||||
peft>=0.14.0,<=0.15.2
|
||||
# core deps
|
||||
transformers>=4.49.0,<=4.56.2,!=4.52.0
|
||||
datasets>=2.16.0,<=4.0.0
|
||||
accelerate>=1.3.0,<=1.10.1
|
||||
peft>=0.14.0,<=0.17.1
|
||||
trl>=0.8.6,<=0.9.6
|
||||
tokenizers>=0.19.0,<=0.21.1
|
||||
gradio>=4.38.0,<=5.31.0
|
||||
scipy
|
||||
# gui
|
||||
gradio>=4.38.0,<=5.45.0
|
||||
matplotlib>=3.7.0
|
||||
tyro<0.9.0
|
||||
# ops
|
||||
einops
|
||||
numpy<2.0.0
|
||||
pandas>=2.0.0
|
||||
scipy
|
||||
# model and tokenizer
|
||||
sentencepiece
|
||||
tiktoken
|
||||
protobuf
|
||||
uvicorn
|
||||
fastapi
|
||||
sse-starlette
|
||||
matplotlib>=3.7.0
|
||||
modelscope>=1.14.0
|
||||
hf-transfer
|
||||
safetensors<=0.5.3
|
||||
# python
|
||||
fire
|
||||
omegaconf
|
||||
packaging
|
||||
protobuf
|
||||
pyyaml
|
||||
numpy<2.0.0
|
||||
pydantic<=2.10.6
|
||||
pandas>=2.0.0
|
||||
# api
|
||||
uvicorn
|
||||
fastapi
|
||||
sse-starlette
|
||||
# media
|
||||
av
|
||||
librosa
|
||||
tyro<0.9.0
|
||||
|
||||
@@ -29,33 +29,30 @@ import shutil
|
||||
|
||||
import fire
|
||||
from peft import PeftModel
|
||||
from transformers import (
|
||||
AutoProcessor,
|
||||
Qwen2_5OmniForConditionalGeneration, # type: ignore
|
||||
Qwen2_5OmniThinkerForConditionalGeneration,
|
||||
)
|
||||
from transformers import AutoConfig, AutoModelForTextToWaveform, AutoProcessor
|
||||
from transformers.utils import cached_file
|
||||
|
||||
|
||||
def merge_lora(
|
||||
base_model_path: str,
|
||||
lora_checkpoint_path: str,
|
||||
model_path: str,
|
||||
lora_path: str,
|
||||
save_path: str = "./merged_model_checkpoint",
|
||||
extra_file: str = "spk_dict.pt",
|
||||
submodule_name: str = "thinker",
|
||||
save_path: str = "./merged_model_checkpoint",
|
||||
):
|
||||
"""Load the original model, merge the LoRA weights.
|
||||
|
||||
For a specified submodule, and save the final merged model along with its configurations.
|
||||
|
||||
Args:
|
||||
base_model_path (str): Path to the original model directory.
|
||||
lora_checkpoint_path (str): Path to the directory containing LoRA weights.
|
||||
model_path (str): Path to the original model directory.
|
||||
lora_path (str): Path to the directory containing LoRA weights.
|
||||
save_path (str): Directory where the merged model and configurations will be saved.
|
||||
extra_file (str): Name of the extra file to be copied (default: "spk_dict.pt").
|
||||
submodule_name (str): Name of the submodule to merge (default: "thinker").
|
||||
save_path (str): Directory where the merged model and configurations will be saved.
|
||||
"""
|
||||
# 1. Load the original model
|
||||
model = Qwen2_5OmniForConditionalGeneration.from_pretrained(base_model_path, torch_dtype="auto", device_map="cpu")
|
||||
model = AutoModelForTextToWaveform.from_pretrained(model_path, torch_dtype="auto", device_map="cpu")
|
||||
print("Successfully loaded the original model.")
|
||||
|
||||
# 2. Extract the submodule to be merged (e.g., model.thinker)
|
||||
@@ -66,13 +63,13 @@ def merge_lora(
|
||||
print(f"Successfully extracted submodule: {submodule_name}.")
|
||||
|
||||
# 3. Load the LoRA weights onto the extracted submodule
|
||||
lora_model = PeftModel.from_pretrained(base_submodule, lora_checkpoint_path)
|
||||
processor = AutoProcessor.from_pretrained(lora_checkpoint_path)
|
||||
print("LoRA weights and processor loaded successfully.")
|
||||
lora_model = PeftModel.from_pretrained(base_submodule, lora_path)
|
||||
processor = AutoProcessor.from_pretrained(lora_path)
|
||||
print("Successfully loaded LoRA weights and processor.")
|
||||
|
||||
# 4. Merge the LoRA weights into the submodule and unload the LoRA modules
|
||||
merged_submodule = lora_model.merge_and_unload()
|
||||
print("LoRA weights merged successfully.")
|
||||
print("Successfully merged LoRA weights.")
|
||||
|
||||
# 5. Replace the original submodule with the merged submodule in the model
|
||||
setattr(model, submodule_name, merged_submodule)
|
||||
@@ -80,20 +77,19 @@ def merge_lora(
|
||||
# 6. Save the final merged model along with the tokenizer and processor configuration
|
||||
model.save_pretrained(save_path)
|
||||
processor.save_pretrained(save_path)
|
||||
print(f"Merged model and tokenizer saved to {save_path}.")
|
||||
print(f"Merged model and processor saved to {save_path}.")
|
||||
|
||||
source_file = os.path.join(base_model_path, extra_file)
|
||||
target_file = os.path.join(save_path, extra_file)
|
||||
if os.path.exists(source_file):
|
||||
shutil.copy(source_file, target_file)
|
||||
print(f"File '{extra_file}' copied from {base_model_path} to {save_path}.")
|
||||
else:
|
||||
print(f"File '{extra_file}' not found in {base_model_path}, skipping copy.")
|
||||
try:
|
||||
source_file = cached_file(path_or_repo_id=model_path, filename=extra_file)
|
||||
shutil.copy(source_file, os.path.join(save_path, extra_file))
|
||||
print(f"File '{extra_file}' copied from {model_path} to {save_path}.")
|
||||
except Exception:
|
||||
print(f"File '{extra_file}' not found in {model_path}, skipping copy.")
|
||||
|
||||
|
||||
def save_full_model(
|
||||
saved_thinker_path: str,
|
||||
base_model_path: str,
|
||||
model_path: str,
|
||||
thinker_path: str,
|
||||
save_path: str = "./merged_model_checkpoint",
|
||||
extra_file: str = "spk_dict.pt",
|
||||
):
|
||||
@@ -102,34 +98,42 @@ def save_full_model(
|
||||
Then save the complete model along with its tokenizer and processor configuration.
|
||||
|
||||
Args:
|
||||
saved_thinker_path (str): Path to the saved thinker weights.
|
||||
base_model_path (str): Directory path of the original model.
|
||||
model_path (str): Directory path of the original model.
|
||||
thinker_path (str): Path to the saved thinker weights.
|
||||
save_path (str): Directory where the merged model and configurations will be saved.
|
||||
extra_file (str): Name of the extra file to be copied (default: "spk_dict.pt").
|
||||
"""
|
||||
# 1. Load the saved thinker module and the original model
|
||||
thinker = Qwen2_5OmniThinkerForConditionalGeneration.from_pretrained(
|
||||
saved_thinker_path, torch_dtype="auto", device_map="cpu"
|
||||
)
|
||||
base_model = Qwen2_5OmniForConditionalGeneration.from_pretrained(
|
||||
base_model_path, torch_dtype="auto", device_map="cpu"
|
||||
)
|
||||
config = AutoConfig.from_pretrained(model_path)
|
||||
if getattr(config, "model_type") == "qwen2_5_omni":
|
||||
from transformers.models.qwen2_5_omni import Qwen2_5OmniThinkerForConditionalGeneration # type: ignore
|
||||
|
||||
ThinkerClass = Qwen2_5OmniThinkerForConditionalGeneration
|
||||
elif getattr(config, "model_type") == "qwen3_omni_moe":
|
||||
from transformers.models.qwen3_omni_moe import Qwen3OmniMoeThinkerForConditionalGeneration # type: ignore
|
||||
|
||||
ThinkerClass = Qwen3OmniMoeThinkerForConditionalGeneration
|
||||
else:
|
||||
raise ValueError(f"Unsupported model type: {getattr(config, 'model_type')}.")
|
||||
|
||||
thinker = ThinkerClass.from_pretrained(thinker_path, torch_dtype="auto", device_map="cpu")
|
||||
base_model = AutoModelForTextToWaveform.from_pretrained(model_path, torch_dtype="auto", device_map="cpu")
|
||||
base_model.thinker = thinker
|
||||
processor = AutoProcessor.from_pretrained(thinker_path)
|
||||
print("Successfully loaded model weights and processor.")
|
||||
|
||||
# 2. Save the complete model along with its tokenizer and processor configuration
|
||||
processor = AutoProcessor.from_pretrained(saved_thinker_path)
|
||||
base_model.save_pretrained(save_path)
|
||||
processor.save_pretrained(save_path)
|
||||
print(f"Merged model and processor saved to {save_path}.")
|
||||
|
||||
# 3. Copy the extra file from the base model directory to the save_path
|
||||
source_file = os.path.join(base_model_path, extra_file)
|
||||
target_file = os.path.join(save_path, extra_file)
|
||||
if os.path.exists(source_file):
|
||||
shutil.copy(source_file, target_file)
|
||||
print(f"File '{extra_file}' copied from {base_model_path} to {save_path}.")
|
||||
else:
|
||||
print(f"File '{extra_file}' not found in {base_model_path}, skipping copy.")
|
||||
try:
|
||||
source_file = cached_file(path_or_repo_id=model_path, filename=extra_file)
|
||||
shutil.copy(source_file, os.path.join(save_path, extra_file))
|
||||
print(f"File '{extra_file}' copied from {model_path} to {save_path}.")
|
||||
except Exception:
|
||||
print(f"File '{extra_file}' not found in {model_path}, skipping copy.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
8
setup.py
@@ -43,7 +43,7 @@ def get_console_scripts() -> list[str]:
|
||||
|
||||
extra_require = {
|
||||
"torch": ["torch>=2.0.0", "torchvision>=0.15.0"],
|
||||
"torch-npu": ["torch==2.4.0", "torch-npu==2.4.0.post2", "decorator"],
|
||||
"torch-npu": ["torch-npu==2.5.1", "torchvision==0.20.1", "decorator"],
|
||||
"metrics": ["nltk", "jieba", "rouge-chinese"],
|
||||
"deepspeed": ["deepspeed>=0.10.0,<=0.16.9"],
|
||||
"liger-kernel": ["liger-kernel>=0.5.5"],
|
||||
@@ -52,7 +52,7 @@ extra_require = {
|
||||
"eetq": ["eetq"],
|
||||
"gptq": ["optimum>=1.24.0", "gptqmodel>=2.0.0"],
|
||||
"aqlm": ["aqlm[gpu]>=1.1.0"],
|
||||
"vllm": ["vllm>=0.4.3,<=0.8.6"],
|
||||
"vllm": ["vllm>=0.4.3,<=0.10.2"],
|
||||
"sglang": ["sglang[srt]>=0.4.5", "transformers==4.51.1"],
|
||||
"galore": ["galore-torch"],
|
||||
"apollo": ["apollo-torch"],
|
||||
@@ -68,9 +68,11 @@ extra_require = {
|
||||
"referencing",
|
||||
"jsonschema_specifications",
|
||||
],
|
||||
"modelscope": ["modelscope"],
|
||||
"openmind": ["openmind"],
|
||||
"swanlab": ["swanlab"],
|
||||
"fp8": ["torchao>=0.8.0", "accelerate>=1.10.0"],
|
||||
"fp8-te": ["transformer_engine[pytorch]>=2.0.0", "accelerate>=1.10.0"],
|
||||
"fp8-all": ["torchao>=0.8.0", "transformer_engine[pytorch]>=2.0.0", "accelerate>=1.10.0"],
|
||||
"dev": ["pre-commit", "ruff", "pytest", "build"],
|
||||
}
|
||||
|
||||
|
||||
@@ -132,7 +132,7 @@ def _process_request(
|
||||
if re.match(r"^data:video\/(mp4|mkv|avi|mov);base64,(.+)$", video_url): # base64 video
|
||||
video_stream = io.BytesIO(base64.b64decode(video_url.split(",", maxsplit=1)[1]))
|
||||
elif os.path.isfile(video_url): # local file
|
||||
video_stream = open(video_url, "rb")
|
||||
video_stream = video_url
|
||||
else: # web uri
|
||||
video_stream = requests.get(video_url, stream=True).raw
|
||||
|
||||
@@ -143,7 +143,7 @@ def _process_request(
|
||||
if re.match(r"^data:audio\/(mpeg|mp3|wav|ogg);base64,(.+)$", audio_url): # base64 audio
|
||||
audio_stream = io.BytesIO(base64.b64decode(audio_url.split(",", maxsplit=1)[1]))
|
||||
elif os.path.isfile(audio_url): # local file
|
||||
audio_stream = open(audio_url, "rb")
|
||||
audio_stream = audio_url
|
||||
else: # web uri
|
||||
audio_stream = requests.get(audio_url, stream=True).raw
|
||||
|
||||
|
||||
@@ -24,9 +24,6 @@ from typing import TYPE_CHECKING, Any, Optional
|
||||
from ..extras.constants import EngineName
|
||||
from ..extras.misc import torch_gc
|
||||
from ..hparams import get_infer_args
|
||||
from .hf_engine import HuggingfaceEngine
|
||||
from .sglang_engine import SGLangEngine
|
||||
from .vllm_engine import VllmEngine
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -49,12 +46,31 @@ class ChatModel:
|
||||
|
||||
def __init__(self, args: Optional[dict[str, Any]] = None) -> None:
|
||||
model_args, data_args, finetuning_args, generating_args = get_infer_args(args)
|
||||
|
||||
if model_args.infer_backend == EngineName.HF:
|
||||
from .hf_engine import HuggingfaceEngine
|
||||
|
||||
self.engine: BaseEngine = HuggingfaceEngine(model_args, data_args, finetuning_args, generating_args)
|
||||
elif model_args.infer_backend == EngineName.VLLM:
|
||||
self.engine: BaseEngine = VllmEngine(model_args, data_args, finetuning_args, generating_args)
|
||||
try:
|
||||
from .vllm_engine import VllmEngine
|
||||
|
||||
self.engine: BaseEngine = VllmEngine(model_args, data_args, finetuning_args, generating_args)
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"vLLM not install, you may need to run `pip install vllm`\n"
|
||||
"or try to use HuggingFace backend: --infer_backend huggingface"
|
||||
) from e
|
||||
elif model_args.infer_backend == EngineName.SGLANG:
|
||||
self.engine: BaseEngine = SGLangEngine(model_args, data_args, finetuning_args, generating_args)
|
||||
try:
|
||||
from .sglang_engine import SGLangEngine
|
||||
|
||||
self.engine: BaseEngine = SGLangEngine(model_args, data_args, finetuning_args, generating_args)
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"SGLang not install, you may need to run `pip install sglang[all]`\n"
|
||||
"or try to use HuggingFace backend: --infer_backend huggingface"
|
||||
) from e
|
||||
else:
|
||||
raise NotImplementedError(f"Unknown backend: {model_args.infer_backend}")
|
||||
|
||||
|
||||
@@ -35,16 +35,53 @@ USAGE = (
|
||||
)
|
||||
|
||||
|
||||
def _run_api():
|
||||
from .api.app import run_api
|
||||
|
||||
return run_api()
|
||||
|
||||
|
||||
def _run_chat():
|
||||
from .chat.chat_model import run_chat
|
||||
|
||||
return run_chat()
|
||||
|
||||
|
||||
def _run_eval():
|
||||
from .eval.evaluator import run_eval
|
||||
|
||||
return run_eval()
|
||||
|
||||
|
||||
def _export_model():
|
||||
from .train.tuner import export_model
|
||||
|
||||
return export_model()
|
||||
|
||||
|
||||
def _run_exp():
|
||||
from .train.tuner import run_exp
|
||||
|
||||
return run_exp()
|
||||
|
||||
|
||||
def _run_web_demo():
|
||||
from .webui.interface import run_web_demo
|
||||
|
||||
return run_web_demo()
|
||||
|
||||
|
||||
def _run_web_ui():
|
||||
from .webui.interface import run_web_ui
|
||||
|
||||
return run_web_ui()
|
||||
|
||||
|
||||
def main():
|
||||
from . import launcher
|
||||
from .api.app import run_api
|
||||
from .chat.chat_model import run_chat
|
||||
from .eval.evaluator import run_eval
|
||||
from .extras import logging
|
||||
from .extras.env import VERSION, print_env
|
||||
from .extras.misc import find_available_port, get_device_count, is_env_enabled, use_ray
|
||||
from .train.tuner import export_model, run_exp
|
||||
from .webui.interface import run_web_demo, run_web_ui
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
@@ -61,14 +98,14 @@ def main():
|
||||
)
|
||||
|
||||
COMMAND_MAP = {
|
||||
"api": run_api,
|
||||
"chat": run_chat,
|
||||
"api": _run_api,
|
||||
"chat": _run_chat,
|
||||
"env": print_env,
|
||||
"eval": run_eval,
|
||||
"export": export_model,
|
||||
"train": run_exp,
|
||||
"webchat": run_web_demo,
|
||||
"webui": run_web_ui,
|
||||
"eval": _run_eval,
|
||||
"export": _export_model,
|
||||
"train": _run_exp,
|
||||
"webchat": _run_web_demo,
|
||||
"webui": _run_web_ui,
|
||||
"version": partial(print, WELCOME),
|
||||
"help": partial(print, USAGE),
|
||||
}
|
||||
|
||||
@@ -194,7 +194,7 @@ class MultiModalDataCollatorForSeq2Seq(DataCollatorForSeq2Seq):
|
||||
elif "video_second_per_grid" in mm_inputs: # for qwen2.5 omni
|
||||
rope_index_kwargs["second_per_grids"] = mm_inputs.get("video_second_per_grid")
|
||||
|
||||
if getattr(self.model.config, "model_type", None) == "qwen2_5_omni_thinker": # for qwen2.5 omni
|
||||
if getattr(self.model.config, "model_type", None) in ["qwen2_5_omni_thinker", "qwen3_omni_moe_thinker"]:
|
||||
rope_index_kwargs["use_audio_in_video"] = getattr(self.processor, "use_audio_in_video", False)
|
||||
feature_attention_mask = mm_inputs.get("feature_attention_mask", None)
|
||||
if feature_attention_mask is not None: # FIXME: need to get video image lengths
|
||||
@@ -205,15 +205,25 @@ class MultiModalDataCollatorForSeq2Seq(DataCollatorForSeq2Seq):
|
||||
features["rope_deltas"] = rope_deltas - (1 - rope_index_kwargs["attention_mask"]).sum(
|
||||
dim=-1
|
||||
).unsqueeze(-1)
|
||||
else: # for qwen2vl
|
||||
else: # for qwen vl
|
||||
features["position_ids"], features["rope_deltas"] = self.get_rope_func(**rope_index_kwargs)
|
||||
|
||||
if (
|
||||
self.model is not None
|
||||
and getattr(self.model.config, "model_type", None) in ["qwen2_vl", "qwen2_5_vl", "qwen2_5_omni_thinker"]
|
||||
and getattr(self.model.config, "model_type", None)
|
||||
in [
|
||||
"glm4v",
|
||||
"Keye",
|
||||
"qwen2_vl",
|
||||
"qwen2_5_vl",
|
||||
"qwen2_5_omni_thinker",
|
||||
"qwen3_omni_moe_thinker",
|
||||
"qwen3_vl",
|
||||
"qwen3_vl_moe",
|
||||
]
|
||||
and ("position_ids" not in features or features["position_ids"].dim() != 3)
|
||||
):
|
||||
raise ValueError("Qwen2-VL/Qwen2.5-Omni model requires 3D position ids for mrope.")
|
||||
raise ValueError(f"{self.model.config.model_type} requires 3D position ids for mrope.")
|
||||
|
||||
if "cross_attention_mask" in mm_inputs: # for mllama inputs when pad_to_multiple_of is enabled
|
||||
cross_attention_mask = mm_inputs.pop("cross_attention_mask")
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import os
|
||||
from abc import abstractmethod
|
||||
from dataclasses import dataclass
|
||||
@@ -227,9 +227,150 @@ class SharegptDatasetConverter(DatasetConverter):
|
||||
return output
|
||||
|
||||
|
||||
@dataclass
|
||||
class OpenAIDatasetConverter(DatasetConverter):
|
||||
def __call__(self, example: dict[str, Any]) -> dict[str, Any]:
|
||||
tag_mapping = {
|
||||
self.dataset_attr.user_tag: Role.USER.value,
|
||||
self.dataset_attr.assistant_tag: Role.ASSISTANT.value,
|
||||
self.dataset_attr.observation_tag: Role.OBSERVATION.value,
|
||||
self.dataset_attr.function_tag: Role.FUNCTION.value,
|
||||
self.dataset_attr.system_tag: Role.SYSTEM.value,
|
||||
}
|
||||
|
||||
messages = example[self.dataset_attr.messages]
|
||||
if (
|
||||
self.dataset_attr.system_tag
|
||||
and len(messages) != 0
|
||||
and messages[0][self.dataset_attr.role_tag] == self.dataset_attr.system_tag
|
||||
):
|
||||
system = messages[0][self.dataset_attr.content_tag]
|
||||
messages = messages[1:]
|
||||
else:
|
||||
system = example.get(self.dataset_attr.system, "") if self.dataset_attr.system else ""
|
||||
|
||||
aligned_messages = []
|
||||
tool_responses = []
|
||||
broken_data = False
|
||||
for turn_idx, message in enumerate(messages):
|
||||
role = message[self.dataset_attr.role_tag]
|
||||
content = message[self.dataset_attr.content_tag]
|
||||
|
||||
if role in [self.dataset_attr.assistant_tag, self.dataset_attr.function_tag]:
|
||||
if "tool_calls" in message and len(message["tool_calls"]) > 0:
|
||||
tool_calls_list = [tool["function"] for tool in message["tool_calls"]]
|
||||
content = json.dumps(tool_calls_list, ensure_ascii=False)
|
||||
role = self.dataset_attr.function_tag
|
||||
|
||||
if role == self.dataset_attr.observation_tag:
|
||||
tool_responses.append(content)
|
||||
continue
|
||||
elif len(tool_responses) > 0:
|
||||
_content = "\n</tool_response>\n<tool_response>\n".join(tool_responses)
|
||||
aligned_messages.append(
|
||||
{
|
||||
"role": Role.OBSERVATION.value,
|
||||
"content": _content,
|
||||
}
|
||||
)
|
||||
tool_responses = []
|
||||
|
||||
aligned_messages.append(
|
||||
{
|
||||
"role": tag_mapping[role],
|
||||
"content": content,
|
||||
}
|
||||
)
|
||||
|
||||
odd_tags = (Role.USER.value, Role.OBSERVATION.value)
|
||||
even_tags = (Role.ASSISTANT.value, Role.FUNCTION.value)
|
||||
accept_tags = (odd_tags, even_tags)
|
||||
for turn_idx, message in enumerate(aligned_messages):
|
||||
if message["role"] not in accept_tags[turn_idx % 2]:
|
||||
logger.warning_rank0(f"Invalid role tag in {messages}.")
|
||||
broken_data = True
|
||||
break
|
||||
|
||||
if (not self.dataset_attr.ranking and len(aligned_messages) % 2 != 0) or (
|
||||
self.dataset_attr.ranking and len(aligned_messages) % 2 == 0
|
||||
):
|
||||
logger.warning_rank0(f"Invalid message count in {messages}.")
|
||||
broken_data = True
|
||||
|
||||
if broken_data:
|
||||
logger.warning_rank0("Skipping this abnormal example.")
|
||||
prompt, response = [], []
|
||||
elif self.dataset_attr.kto_tag and isinstance(example[self.dataset_attr.kto_tag], bool): # kto example
|
||||
prompt = aligned_messages[:-1]
|
||||
response = aligned_messages[-1:]
|
||||
if example[self.dataset_attr.kto_tag]:
|
||||
response = response + [{"role": Role.ASSISTANT.value, "content": ""}]
|
||||
else:
|
||||
response = [{"role": Role.ASSISTANT.value, "content": ""}] + response
|
||||
elif (
|
||||
self.dataset_attr.ranking
|
||||
and isinstance(example[self.dataset_attr.chosen], dict)
|
||||
and isinstance(example[self.dataset_attr.rejected], dict)
|
||||
): # pairwise example
|
||||
chosen = example[self.dataset_attr.chosen]
|
||||
rejected = example[self.dataset_attr.rejected]
|
||||
if (
|
||||
chosen[self.dataset_attr.role_tag] not in accept_tags[-1]
|
||||
or rejected[self.dataset_attr.role_tag] not in accept_tags[-1]
|
||||
):
|
||||
logger.warning_rank0(f"Invalid role tag in {[chosen, rejected]}.")
|
||||
broken_data = True
|
||||
|
||||
prompt = aligned_messages
|
||||
response = [
|
||||
{
|
||||
"role": tag_mapping[chosen[self.dataset_attr.role_tag]],
|
||||
"content": chosen[self.dataset_attr.content_tag],
|
||||
},
|
||||
{
|
||||
"role": tag_mapping[rejected[self.dataset_attr.role_tag]],
|
||||
"content": rejected[self.dataset_attr.content_tag],
|
||||
},
|
||||
]
|
||||
else: # normal example
|
||||
prompt = aligned_messages[:-1]
|
||||
response = aligned_messages[-1:]
|
||||
|
||||
tools = example.get(self.dataset_attr.tools, "") if self.dataset_attr.tools else ""
|
||||
if isinstance(tools, dict) or isinstance(tools, list):
|
||||
tools = json.dumps(tools, ensure_ascii=False)
|
||||
|
||||
short_system_prompt = "detailed thinking off"
|
||||
if not system:
|
||||
if not tools:
|
||||
system = short_system_prompt
|
||||
else:
|
||||
pass
|
||||
else:
|
||||
if not tools:
|
||||
if "detailed thinking on" in system or "detailed thinking off" in system:
|
||||
pass
|
||||
else:
|
||||
system += "\n" + short_system_prompt
|
||||
else:
|
||||
system += "\n"
|
||||
|
||||
output = {
|
||||
"_prompt": prompt,
|
||||
"_response": response,
|
||||
"_system": system,
|
||||
"_tools": tools,
|
||||
"_images": self._find_medias(example[self.dataset_attr.images]) if self.dataset_attr.images else None,
|
||||
"_videos": self._find_medias(example[self.dataset_attr.videos]) if self.dataset_attr.videos else None,
|
||||
"_audios": self._find_medias(example[self.dataset_attr.audios]) if self.dataset_attr.audios else None,
|
||||
}
|
||||
return output
|
||||
|
||||
|
||||
DATASET_CONVERTERS = {
|
||||
"alpaca": AlpacaDatasetConverter,
|
||||
"sharegpt": SharegptDatasetConverter,
|
||||
"openai": OpenAIDatasetConverter,
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -97,8 +97,11 @@ class FunctionFormatter(StringFormatter):
|
||||
@override
|
||||
def apply(self, **kwargs) -> SLOTS:
|
||||
content: str = kwargs.pop("content")
|
||||
regex = re.compile(r"<think>(.*)</think>", re.DOTALL)
|
||||
thought = re.search(regex, content)
|
||||
thought_words, thought = kwargs.pop("thought_words", None), None
|
||||
if thought_words and len(thought_words) == 2:
|
||||
regex = re.compile(rf"{re.escape(thought_words[0])}(.*?){re.escape(thought_words[1])}", re.DOTALL)
|
||||
thought = re.search(regex, content)
|
||||
|
||||
if thought:
|
||||
content = content.replace(thought.group(0), "")
|
||||
|
||||
|
||||
@@ -91,7 +91,7 @@ def _load_single_dataset(
|
||||
raise NotImplementedError(f"Unknown load type: {dataset_attr.load_from}.")
|
||||
|
||||
if dataset_attr.load_from == "ms_hub":
|
||||
check_version("modelscope>=1.11.0", mandatory=True)
|
||||
check_version("modelscope>=1.14.0", mandatory=True)
|
||||
from modelscope import MsDataset # type: ignore
|
||||
from modelscope.utils.config_ds import MS_DATASETS_CACHE # type: ignore
|
||||
|
||||
|
||||
@@ -27,6 +27,10 @@ from typing import TYPE_CHECKING, BinaryIO, Literal, Optional, TypedDict, Union
|
||||
import numpy as np
|
||||
import torch
|
||||
from transformers.image_utils import get_image_size, is_valid_image, to_numpy_array
|
||||
from transformers.models.mllama.processing_mllama import (
|
||||
convert_sparse_cross_attention_mask_to_dense,
|
||||
get_cross_attention_token_mask,
|
||||
)
|
||||
from typing_extensions import override
|
||||
|
||||
from ..extras.constants import AUDIO_PLACEHOLDER, IGNORE_INDEX, IMAGE_PLACEHOLDER, VIDEO_PLACEHOLDER
|
||||
@@ -51,17 +55,10 @@ if is_pyav_available():
|
||||
import av
|
||||
|
||||
|
||||
if is_transformers_version_greater_than("4.45.0"):
|
||||
from transformers.models.mllama.processing_mllama import (
|
||||
convert_sparse_cross_attention_mask_to_dense,
|
||||
get_cross_attention_token_mask,
|
||||
)
|
||||
|
||||
|
||||
if is_transformers_version_greater_than("4.52.0"):
|
||||
from transformers.image_utils import make_flat_list_of_images
|
||||
from transformers.video_utils import make_batched_videos
|
||||
elif is_transformers_version_greater_than("4.49.0"):
|
||||
else:
|
||||
from transformers.image_utils import make_batched_videos, make_flat_list_of_images
|
||||
|
||||
|
||||
@@ -137,7 +134,7 @@ def _make_batched_images(images: list["ImageObject"], imglens: list[int]) -> lis
|
||||
|
||||
def _check_video_is_nested_images(video: "VideoInput") -> bool:
|
||||
r"""Check if the video is nested images."""
|
||||
return isinstance(video, list) and all(isinstance(frame, (str, BinaryIO, dict)) for frame in video)
|
||||
return isinstance(video, list) and all(isinstance(frame, (str, BinaryIO, dict, ImageObject)) for frame in video)
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -298,11 +295,8 @@ class MMPluginMixin:
|
||||
r"""Regularizes audios to avoid error. Including reading and resampling."""
|
||||
results, sampling_rates = [], []
|
||||
for audio in audios:
|
||||
if isinstance(audio, (str, BinaryIO)):
|
||||
audio, sampling_rate = librosa.load(audio, sr=sampling_rate)
|
||||
|
||||
if not isinstance(audio, np.ndarray):
|
||||
raise ValueError(f"Expect input is a list of audios, but got {type(audio)}.")
|
||||
audio, sampling_rate = librosa.load(audio, sr=sampling_rate)
|
||||
|
||||
results.append(audio)
|
||||
sampling_rates.append(sampling_rate)
|
||||
@@ -391,7 +385,7 @@ class MMPluginMixin:
|
||||
return_tensors="pt",
|
||||
)
|
||||
)
|
||||
mm_inputs["feature_attention_mask"] = mm_inputs.pop("attention_mask") # prevent conflicts
|
||||
mm_inputs["feature_attention_mask"] = mm_inputs.pop("attention_mask", None) # prevent conflicts
|
||||
|
||||
return mm_inputs
|
||||
|
||||
@@ -512,6 +506,39 @@ class Gemma3Plugin(BasePlugin):
|
||||
return mm_inputs
|
||||
|
||||
|
||||
class Gemma3nPlugin(Gemma3Plugin):
|
||||
@override
|
||||
def process_messages(
|
||||
self,
|
||||
messages: list[dict[str, str]],
|
||||
images: list["ImageInput"],
|
||||
videos: list["VideoInput"],
|
||||
audios: list["AudioInput"],
|
||||
processor: Optional["MMProcessor"],
|
||||
) -> list[dict[str, str]]:
|
||||
self._validate_input(processor, images, videos, audios)
|
||||
self._validate_messages(messages, images, videos, audios)
|
||||
messages = deepcopy(messages)
|
||||
boi_token: str = getattr(processor, "boi_token")
|
||||
boa_token: str = getattr(processor, "boa_token")
|
||||
full_image_sequence: str = getattr(processor, "full_image_sequence")
|
||||
full_audio_sequence: str = getattr(processor, "full_audio_sequence")
|
||||
image_str = full_image_sequence if self.expand_mm_tokens else boi_token
|
||||
audio_str = full_audio_sequence if self.expand_mm_tokens else boa_token
|
||||
|
||||
for message in messages:
|
||||
content = message["content"]
|
||||
while IMAGE_PLACEHOLDER in content:
|
||||
content = content.replace(IMAGE_PLACEHOLDER, image_str, 1)
|
||||
|
||||
while AUDIO_PLACEHOLDER in content:
|
||||
content = content.replace(AUDIO_PLACEHOLDER, audio_str, 1)
|
||||
|
||||
message["content"] = content
|
||||
|
||||
return messages
|
||||
|
||||
|
||||
@dataclass
|
||||
class InternVLPlugin(BasePlugin):
|
||||
@override
|
||||
@@ -1370,6 +1397,9 @@ class Qwen2AudioPlugin(BasePlugin):
|
||||
|
||||
@dataclass
|
||||
class Qwen2VLPlugin(BasePlugin):
|
||||
vision_bos_token: str = "<|vision_start|>"
|
||||
vision_eos_token: str = "<|vision_end|>"
|
||||
|
||||
@override
|
||||
def _preprocess_image(self, image: "ImageObject", **kwargs) -> "ImageObject":
|
||||
image = super()._preprocess_image(image, **kwargs)
|
||||
@@ -1485,14 +1515,18 @@ class Qwen2VLPlugin(BasePlugin):
|
||||
while IMAGE_PLACEHOLDER in content:
|
||||
image_seqlen = image_grid_thw[num_image_tokens].prod() // merge_length if self.expand_mm_tokens else 1
|
||||
content = content.replace(
|
||||
IMAGE_PLACEHOLDER, f"<|vision_start|>{self.image_token * image_seqlen}<|vision_end|>", 1
|
||||
IMAGE_PLACEHOLDER,
|
||||
f"{self.vision_bos_token}{self.image_token * image_seqlen}{self.vision_eos_token}",
|
||||
1,
|
||||
)
|
||||
num_image_tokens += 1
|
||||
|
||||
while VIDEO_PLACEHOLDER in content:
|
||||
video_seqlen = video_grid_thw[num_video_tokens].prod() // merge_length if self.expand_mm_tokens else 1
|
||||
content = content.replace(
|
||||
VIDEO_PLACEHOLDER, f"<|vision_start|>{self.video_token * video_seqlen}<|vision_end|>", 1
|
||||
VIDEO_PLACEHOLDER,
|
||||
f"{self.vision_bos_token}{self.video_token * video_seqlen}{self.vision_eos_token}",
|
||||
1,
|
||||
)
|
||||
num_video_tokens += 1
|
||||
|
||||
@@ -1501,7 +1535,259 @@ class Qwen2VLPlugin(BasePlugin):
|
||||
return messages
|
||||
|
||||
|
||||
@dataclass
|
||||
class Qwen3VLPlugin(Qwen2VLPlugin):
|
||||
@override
|
||||
def _get_mm_inputs(
|
||||
self,
|
||||
images: list["ImageInput"],
|
||||
videos: list["VideoInput"],
|
||||
audios: list["AudioInput"],
|
||||
processor: "MMProcessor",
|
||||
) -> dict[str, "torch.Tensor"]:
|
||||
image_processor: BaseImageProcessor = getattr(processor, "image_processor", None)
|
||||
video_processor: BaseImageProcessor = getattr(processor, "video_processor", None)
|
||||
mm_inputs = {}
|
||||
if len(images) != 0:
|
||||
images = self._regularize_images(
|
||||
images,
|
||||
image_max_pixels=getattr(processor, "image_max_pixels", 768 * 768),
|
||||
image_min_pixels=getattr(processor, "image_min_pixels", 32 * 32),
|
||||
)["images"]
|
||||
mm_inputs.update(image_processor(images, return_tensors="pt"))
|
||||
|
||||
if len(videos) != 0:
|
||||
videos = self._regularize_videos(
|
||||
videos,
|
||||
image_max_pixels=getattr(processor, "video_max_pixels", 256 * 256),
|
||||
image_min_pixels=getattr(processor, "video_min_pixels", 16 * 16),
|
||||
video_fps=getattr(processor, "video_fps", 2.0),
|
||||
video_maxlen=getattr(processor, "video_maxlen", 128),
|
||||
)
|
||||
video_metadata = [
|
||||
{"fps": getattr(processor, "video_fps", 24.0), "duration": len(video), "total_num_frames": len(video)}
|
||||
for video in videos["videos"]
|
||||
]
|
||||
mm_inputs.update(
|
||||
video_processor(videos=videos["videos"], video_metadata=video_metadata, return_metadata=True)
|
||||
)
|
||||
temporal_patch_size: int = getattr(image_processor, "temporal_patch_size", 2)
|
||||
if "second_per_grid_ts" in processor.model_input_names:
|
||||
mm_inputs["second_per_grid_ts"] = [temporal_patch_size / fps for fps in videos["fps_per_video"]]
|
||||
|
||||
return mm_inputs
|
||||
|
||||
@override
|
||||
def process_messages(
|
||||
self,
|
||||
messages: list[dict[str, str]],
|
||||
images: list["ImageInput"],
|
||||
videos: list["VideoInput"],
|
||||
audios: list["AudioInput"],
|
||||
processor: Optional["MMProcessor"],
|
||||
) -> list[dict[str, str]]:
|
||||
self._validate_input(processor, images, videos, audios)
|
||||
self._validate_messages(messages, images, videos, audios)
|
||||
num_image_tokens, num_video_tokens = 0, 0
|
||||
messages = deepcopy(messages)
|
||||
image_processor: BaseImageProcessor = getattr(processor, "image_processor")
|
||||
video_processor: BaseImageProcessor = getattr(processor, "video_processor")
|
||||
|
||||
image_merge_length: int = getattr(image_processor, "merge_size") ** 2
|
||||
video_merge_length: int = getattr(video_processor, "merge_size") ** 2
|
||||
if self.expand_mm_tokens:
|
||||
mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
|
||||
image_grid_thw = mm_inputs.get("image_grid_thw", [])
|
||||
video_grid_thw = mm_inputs.get("video_grid_thw", [])
|
||||
num_frames = video_grid_thw[0][0] if len(video_grid_thw) > 0 else 0 # hard code for now
|
||||
video_metadata = mm_inputs.get("video_metadata", {})
|
||||
|
||||
else:
|
||||
image_grid_thw = [None] * len(images)
|
||||
video_grid_thw = [None] * len(videos)
|
||||
num_frames = 0
|
||||
timestamps = [0]
|
||||
|
||||
for idx, message in enumerate(messages):
|
||||
content = message["content"]
|
||||
while IMAGE_PLACEHOLDER in content:
|
||||
image_seqlen = (
|
||||
image_grid_thw[num_image_tokens].prod() // image_merge_length if self.expand_mm_tokens else 1
|
||||
)
|
||||
content = content.replace(
|
||||
IMAGE_PLACEHOLDER,
|
||||
f"{self.vision_bos_token}{self.image_token * image_seqlen}{self.vision_eos_token}",
|
||||
1,
|
||||
)
|
||||
num_image_tokens += 1
|
||||
|
||||
while VIDEO_PLACEHOLDER in content:
|
||||
metadata = video_metadata[idx]
|
||||
timestamps = processor._calculate_timestamps(
|
||||
metadata.frames_indices,
|
||||
metadata.fps,
|
||||
video_processor.merge_size,
|
||||
)
|
||||
video_structure = ""
|
||||
for frame_index in range(num_frames):
|
||||
video_seqlen = (
|
||||
video_grid_thw[num_video_tokens][1:].prod() // video_merge_length
|
||||
if self.expand_mm_tokens
|
||||
else 1
|
||||
)
|
||||
timestamp_sec = timestamps[frame_index]
|
||||
frame_structure = (
|
||||
f"<{timestamp_sec:.1f} seconds>"
|
||||
f"{self.vision_bos_token}{self.video_token * video_seqlen}{self.vision_eos_token}"
|
||||
)
|
||||
video_structure += frame_structure
|
||||
|
||||
if not self.expand_mm_tokens:
|
||||
video_structure = f"{self.vision_bos_token}{self.video_token}{self.vision_eos_token}"
|
||||
|
||||
content = content.replace(VIDEO_PLACEHOLDER, video_structure, 1)
|
||||
num_video_tokens += 1
|
||||
|
||||
message["content"] = content
|
||||
|
||||
return messages
|
||||
|
||||
|
||||
@dataclass
|
||||
class GLM4VPlugin(Qwen2VLPlugin):
|
||||
@override
|
||||
def _get_mm_inputs(
|
||||
self,
|
||||
images: list["ImageInput"],
|
||||
videos: list["VideoInput"],
|
||||
audios: list["AudioInput"],
|
||||
processor: "MMProcessor",
|
||||
) -> dict[str, "torch.Tensor"]:
|
||||
image_processor: BaseImageProcessor = getattr(processor, "image_processor", None)
|
||||
video_processor: BaseImageProcessor = getattr(processor, "video_processor", None)
|
||||
mm_inputs = {}
|
||||
if len(images) != 0:
|
||||
images = self._regularize_images(
|
||||
images,
|
||||
image_max_pixels=getattr(processor, "image_max_pixels", 768 * 768),
|
||||
image_min_pixels=getattr(processor, "image_min_pixels", 32 * 32),
|
||||
)["images"]
|
||||
mm_inputs.update(image_processor(images, return_tensors="pt"))
|
||||
|
||||
if len(videos) != 0:
|
||||
video_data = self._regularize_videos(
|
||||
videos,
|
||||
image_max_pixels=getattr(processor, "video_max_pixels", 256 * 256),
|
||||
image_min_pixels=getattr(processor, "video_min_pixels", 16 * 16),
|
||||
video_fps=getattr(processor, "video_fps", 2.0),
|
||||
video_maxlen=getattr(processor, "video_maxlen", 128),
|
||||
)
|
||||
# prepare video metadata
|
||||
video_metadata = [
|
||||
{"fps": 2, "duration": len(video), "total_frames": len(video)} for video in video_data["videos"]
|
||||
]
|
||||
mm_inputs.update(video_processor(images=None, videos=video_data["videos"], video_metadata=video_metadata))
|
||||
|
||||
return mm_inputs
|
||||
|
||||
@override
|
||||
def process_messages(
|
||||
self,
|
||||
messages: list[dict[str, str]],
|
||||
images: list["ImageInput"],
|
||||
videos: list["VideoInput"],
|
||||
audios: list["AudioInput"],
|
||||
processor: Optional["MMProcessor"],
|
||||
) -> list[dict[str, str]]:
|
||||
self._validate_input(processor, images, videos, audios)
|
||||
self._validate_messages(messages, images, videos, audios)
|
||||
num_image_tokens, num_video_tokens = 0, 0
|
||||
messages = deepcopy(messages)
|
||||
image_processor: BaseImageProcessor = getattr(processor, "image_processor")
|
||||
|
||||
merge_length: int = getattr(image_processor, "merge_size") ** 2
|
||||
if self.expand_mm_tokens:
|
||||
mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
|
||||
image_grid_thw = mm_inputs.get("image_grid_thw", [])
|
||||
video_grid_thw = mm_inputs.get("video_grid_thw", [])
|
||||
num_frames = video_grid_thw[0][0] if len(video_grid_thw) > 0 else 0 # hard code for now
|
||||
timestamps = mm_inputs.get("timestamps", [])
|
||||
|
||||
if hasattr(timestamps, "tolist"):
|
||||
timestamps = timestamps.tolist()
|
||||
|
||||
if not timestamps:
|
||||
timestamps_list = []
|
||||
elif isinstance(timestamps[0], list):
|
||||
timestamps_list = timestamps[0]
|
||||
else:
|
||||
timestamps_list = timestamps
|
||||
|
||||
unique_timestamps = timestamps_list.copy()
|
||||
selected_timestamps = unique_timestamps[:num_frames]
|
||||
while len(selected_timestamps) < num_frames:
|
||||
selected_timestamps.append(selected_timestamps[-1] if selected_timestamps else 0)
|
||||
|
||||
else:
|
||||
image_grid_thw = [None] * len(images)
|
||||
video_grid_thw = [None] * len(videos)
|
||||
num_frames = 0
|
||||
selected_timestamps = [0]
|
||||
|
||||
for message in messages:
|
||||
content = message["content"]
|
||||
while IMAGE_PLACEHOLDER in content:
|
||||
image_seqlen = image_grid_thw[num_image_tokens].prod() // merge_length if self.expand_mm_tokens else 1
|
||||
content = content.replace(
|
||||
IMAGE_PLACEHOLDER, f"<|begin_of_image|>{self.image_token * image_seqlen}<|end_of_image|>", 1
|
||||
)
|
||||
num_image_tokens += 1
|
||||
|
||||
while VIDEO_PLACEHOLDER in content:
|
||||
video_structure = ""
|
||||
for frame_index in range(num_frames):
|
||||
video_seqlen = (
|
||||
video_grid_thw[num_video_tokens][1:].prod() // merge_length if self.expand_mm_tokens else 1
|
||||
)
|
||||
timestamp_sec = selected_timestamps[frame_index]
|
||||
frame_structure = (
|
||||
f"<|begin_of_image|>{self.image_token * video_seqlen}<|end_of_image|>{timestamp_sec}"
|
||||
)
|
||||
video_structure += frame_structure
|
||||
|
||||
if not self.expand_mm_tokens:
|
||||
video_structure = self.video_token
|
||||
|
||||
content = content.replace(VIDEO_PLACEHOLDER, f"<|begin_of_video|>{video_structure}<|end_of_video|>", 1)
|
||||
num_video_tokens += 1
|
||||
|
||||
message["content"] = content
|
||||
|
||||
return messages
|
||||
|
||||
@override
|
||||
def get_mm_inputs(
|
||||
self,
|
||||
images: list["ImageInput"],
|
||||
videos: list["VideoInput"],
|
||||
audios: list["AudioInput"],
|
||||
imglens: list[int],
|
||||
vidlens: list[int],
|
||||
audlens: list[int],
|
||||
batch_ids: list[list[int]],
|
||||
processor: Optional["ProcessorMixin"],
|
||||
) -> dict[str, Union[list[int], "torch.Tensor"]]:
|
||||
self._validate_input(processor, images, videos, audios)
|
||||
mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
|
||||
mm_inputs.pop("timestamps", None)
|
||||
return mm_inputs
|
||||
|
||||
|
||||
@dataclass
|
||||
class Qwen2OmniPlugin(Qwen2VLPlugin):
|
||||
audio_bos_token: str = "<|audio_start|>"
|
||||
audio_eos_token: str = "<|audio_end|>"
|
||||
|
||||
@override
|
||||
def _get_mm_inputs(
|
||||
self,
|
||||
@@ -1588,7 +1874,9 @@ class Qwen2OmniPlugin(Qwen2VLPlugin):
|
||||
while IMAGE_PLACEHOLDER in content:
|
||||
image_seqlen = image_grid_thw[num_image_tokens].prod() // merge_length if self.expand_mm_tokens else 1
|
||||
content = content.replace(
|
||||
IMAGE_PLACEHOLDER, f"<|vision_bos|>{self.image_token * image_seqlen}<|vision_eos|>", 1
|
||||
IMAGE_PLACEHOLDER,
|
||||
f"{self.vision_bos_token}{self.image_token * image_seqlen}{self.vision_eos_token}",
|
||||
1,
|
||||
)
|
||||
num_image_tokens += 1
|
||||
|
||||
@@ -1625,7 +1913,7 @@ class Qwen2OmniPlugin(Qwen2VLPlugin):
|
||||
video_chunk_indices = processor.get_chunked_index(video_t_index, t_ntoken_per_chunk)
|
||||
audio_chunk_indices = processor.get_chunked_index(audio_t_index, t_ntoken_per_chunk)
|
||||
placeholder_string = ""
|
||||
placeholder_string += "<|vision_bos|>" + "<|audio_bos|>"
|
||||
placeholder_string += self.vision_bos_token + self.audio_bos_token
|
||||
for j in range(max(len(video_chunk_indices), len(audio_chunk_indices))):
|
||||
video_chunk_index = video_chunk_indices[j] if j < len(video_chunk_indices) else None
|
||||
audio_chunk_index = audio_chunk_indices[j] if j < len(audio_chunk_indices) else None
|
||||
@@ -1635,7 +1923,7 @@ class Qwen2OmniPlugin(Qwen2VLPlugin):
|
||||
if audio_chunk_index is not None:
|
||||
placeholder_string += self.audio_token * (audio_chunk_index[1] - audio_chunk_index[0])
|
||||
|
||||
placeholder_string += "<|audio_eos|>" + "<|vision_eos|>"
|
||||
placeholder_string += self.audio_eos_token + self.vision_eos_token
|
||||
content = content.replace(VIDEO_PLACEHOLDER, placeholder_string, 1)
|
||||
content = content.replace(AUDIO_PLACEHOLDER, "", 1)
|
||||
num_audio_tokens += 1
|
||||
@@ -1644,7 +1932,9 @@ class Qwen2OmniPlugin(Qwen2VLPlugin):
|
||||
while AUDIO_PLACEHOLDER in content:
|
||||
audio_seqlen = audio_lengths[num_audio_tokens] if self.expand_mm_tokens else 1
|
||||
content = content.replace(
|
||||
AUDIO_PLACEHOLDER, f"<|audio_bos|>{self.audio_token * audio_seqlen}<|audio_eos|>", 1
|
||||
AUDIO_PLACEHOLDER,
|
||||
f"{self.audio_bos_token}{self.audio_token * audio_seqlen}{self.audio_eos_token}",
|
||||
1,
|
||||
)
|
||||
num_audio_tokens += 1
|
||||
|
||||
@@ -1653,7 +1943,9 @@ class Qwen2OmniPlugin(Qwen2VLPlugin):
|
||||
video_grid_thw[num_video_tokens].prod() // merge_length if self.expand_mm_tokens else 1
|
||||
)
|
||||
content = content.replace(
|
||||
VIDEO_PLACEHOLDER, f"<|vision_bos|>{self.video_token * video_seqlen}<|vision_eos|>", 1
|
||||
VIDEO_PLACEHOLDER,
|
||||
f"{self.vision_bos_token}{self.video_token * video_seqlen}{self.vision_eos_token}",
|
||||
1,
|
||||
)
|
||||
num_video_tokens += 1
|
||||
|
||||
@@ -1718,6 +2010,8 @@ class VideoLlavaPlugin(BasePlugin):
|
||||
PLUGINS = {
|
||||
"base": BasePlugin,
|
||||
"gemma3": Gemma3Plugin,
|
||||
"glm4v": GLM4VPlugin,
|
||||
"gemma3n": Gemma3nPlugin,
|
||||
"intern_vl": InternVLPlugin,
|
||||
"kimi_vl": KimiVLPlugin,
|
||||
"llama4": Llama4Plugin,
|
||||
@@ -1731,6 +2025,7 @@ PLUGINS = {
|
||||
"qwen2_audio": Qwen2AudioPlugin,
|
||||
"qwen2_omni": Qwen2OmniPlugin,
|
||||
"qwen2_vl": Qwen2VLPlugin,
|
||||
"qwen3_vl": Qwen3VLPlugin,
|
||||
"video_llava": VideoLlavaPlugin,
|
||||
}
|
||||
|
||||
@@ -1748,9 +2043,10 @@ def get_mm_plugin(
|
||||
image_token: Optional[str] = None,
|
||||
video_token: Optional[str] = None,
|
||||
audio_token: Optional[str] = None,
|
||||
**kwargs,
|
||||
) -> "BasePlugin":
|
||||
r"""Get plugin for multimodal inputs."""
|
||||
if name not in PLUGINS:
|
||||
raise ValueError(f"Multimodal plugin `{name}` not found.")
|
||||
|
||||
return PLUGINS[name](image_token, video_token, audio_token)
|
||||
return PLUGINS[name](image_token, video_token, audio_token, **kwargs)
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
import json
|
||||
import os
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Literal, Optional
|
||||
from typing import Any, Literal, Optional, Union
|
||||
|
||||
from huggingface_hub import hf_hub_download
|
||||
|
||||
@@ -90,12 +90,14 @@ class DatasetAttr:
|
||||
self.set_attr(tag, attr["tags"])
|
||||
|
||||
|
||||
def get_dataset_list(dataset_names: Optional[list[str]], dataset_dir: str) -> list["DatasetAttr"]:
|
||||
def get_dataset_list(dataset_names: Optional[list[str]], dataset_dir: Union[str, dict]) -> list["DatasetAttr"]:
|
||||
r"""Get the attributes of the datasets."""
|
||||
if dataset_names is None:
|
||||
dataset_names = []
|
||||
|
||||
if dataset_dir == "ONLINE":
|
||||
if isinstance(dataset_dir, dict):
|
||||
dataset_info = dataset_dir
|
||||
elif dataset_dir == "ONLINE":
|
||||
dataset_info = None
|
||||
else:
|
||||
if dataset_dir.startswith("REMOTE:"):
|
||||
|
||||
@@ -62,7 +62,7 @@ class SupervisedDatasetProcessor(DatasetProcessor):
|
||||
|
||||
if self.data_args.train_on_prompt:
|
||||
source_label = source_ids
|
||||
elif self.template.efficient_eos:
|
||||
elif self.template.efficient_eos and turn_idx != 0:
|
||||
source_label = [self.tokenizer.eos_token_id] + [IGNORE_INDEX] * (source_len - 1)
|
||||
else:
|
||||
source_label = [IGNORE_INDEX] * source_len
|
||||
|
||||
@@ -96,7 +96,7 @@ class Template:
|
||||
|
||||
def add_thought(self, content: str = "") -> str:
|
||||
r"""Add empty thought to assistant message."""
|
||||
return f"{self.thought_words[0]}\n\n{self.thought_words[1]}\n\n" + content
|
||||
return f"{self.thought_words[0]}{self.thought_words[1]}" + content
|
||||
|
||||
def remove_thought(self, content: str) -> str:
|
||||
r"""Remove thought from assistant message."""
|
||||
@@ -156,7 +156,7 @@ class Template:
|
||||
elif message["role"] == Role.OBSERVATION:
|
||||
elements += self.format_observation.apply(content=message["content"])
|
||||
elif message["role"] == Role.FUNCTION:
|
||||
elements += self.format_function.apply(content=message["content"])
|
||||
elements += self.format_function.apply(content=message["content"], thought_words=self.thought_words)
|
||||
else:
|
||||
raise NotImplementedError("Unexpected role: {}".format(message["role"]))
|
||||
|
||||
@@ -416,8 +416,8 @@ class ReasoningTemplate(Template):
|
||||
|
||||
prompt_ids, response_ids = super().encode_oneturn(tokenizer, messages, system, tools)
|
||||
if (
|
||||
self.thought_words[0] not in messages[-1]["content"]
|
||||
and self.thought_words[1] not in messages[-1]["content"]
|
||||
self.thought_words[0].strip() not in messages[-1]["content"]
|
||||
and self.thought_words[1].strip() not in messages[-1]["content"]
|
||||
): # add empty cot
|
||||
if not self.enable_thinking: # do not compute loss
|
||||
prompt_ids += self.get_thought_word_ids(tokenizer)
|
||||
@@ -442,8 +442,8 @@ class ReasoningTemplate(Template):
|
||||
encoded_messages = self._encode(tokenizer, messages, system, tools)
|
||||
for i in range(0, len(messages), 2):
|
||||
if (
|
||||
self.thought_words[0] not in messages[i + 1]["content"]
|
||||
and self.thought_words[1] not in messages[i + 1]["content"]
|
||||
self.thought_words[0].strip() not in messages[i + 1]["content"]
|
||||
and self.thought_words[1].strip() not in messages[i + 1]["content"]
|
||||
): # add empty cot
|
||||
if not self.enable_thinking: # do not compute loss
|
||||
encoded_messages[i] += self.get_thought_word_ids(tokenizer)
|
||||
@@ -518,7 +518,7 @@ def register_template(
|
||||
format_prefix=format_prefix or default_prefix_formatter,
|
||||
default_system=default_system,
|
||||
stop_words=stop_words or [],
|
||||
thought_words=thought_words or ("<think>", "</think>"),
|
||||
thought_words=thought_words or ("<think>\n", "\n</think>\n\n"),
|
||||
efficient_eos=efficient_eos,
|
||||
replace_eos=replace_eos,
|
||||
replace_jinja_template=replace_jinja_template,
|
||||
@@ -579,7 +579,7 @@ def parse_template(tokenizer: "PreTrainedTokenizer") -> "Template":
|
||||
format_prefix=EmptyFormatter(slots=[prefix]) if prefix else EmptyFormatter(),
|
||||
default_system=default_system,
|
||||
stop_words=[],
|
||||
thought_words=("<think>", "</think>"),
|
||||
thought_words=("<think>\n", "\n</think>\n\n"),
|
||||
efficient_eos=False,
|
||||
replace_eos=False,
|
||||
replace_jinja_template=False,
|
||||
@@ -679,6 +679,23 @@ register_template(
|
||||
)
|
||||
|
||||
|
||||
register_template(
|
||||
name="bailing_v2",
|
||||
format_user=StringFormatter(slots=["<role>HUMAN</role>{{content}}<|role_end|><role>ASSISTANT</role>"]),
|
||||
format_system=StringFormatter(slots=["<role>SYSTEM</role>{{content}}<|role_end|>"]),
|
||||
format_assistant=StringFormatter(slots=["{{content}}<|role_end|>"]),
|
||||
format_observation=StringFormatter(
|
||||
slots=[
|
||||
"<role>OBSERVATION</role>\n<tool_response>\n{{content}}\n</tool_response><|role_end|><role>ASSISTANT</role>"
|
||||
]
|
||||
),
|
||||
format_function=FunctionFormatter(slots=["{{content}}<|role_end|>"], tool_format="ling"),
|
||||
format_tools=ToolFormatter(tool_format="ling"),
|
||||
stop_words=["<|endoftext|>"],
|
||||
efficient_eos=True,
|
||||
)
|
||||
|
||||
|
||||
register_template(
|
||||
name="belle",
|
||||
format_user=StringFormatter(slots=["Human: {{content}}\n\nBelle: "]),
|
||||
@@ -894,12 +911,51 @@ register_template(
|
||||
)
|
||||
|
||||
|
||||
register_template(
|
||||
name="dots_ocr",
|
||||
format_user=StringFormatter(slots=["<|user|>{{content}}<|endofuser|><|assistant|>"]),
|
||||
format_assistant=StringFormatter(slots=["{{content}}<|endofassistant|>"]),
|
||||
format_system=StringFormatter(slots=["<|system|>{{content}}<|endofsystem|>\n"]),
|
||||
stop_words=["<|endofassistant|>"],
|
||||
efficient_eos=True,
|
||||
mm_plugin=get_mm_plugin(
|
||||
name="qwen2_vl",
|
||||
image_token="<|imgpad|>",
|
||||
video_token="<|vidpad|>",
|
||||
vision_bos_token="<|img|>",
|
||||
vision_eos_token="<|endofimg|>",
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
register_template(
|
||||
name="empty",
|
||||
format_assistant=StringFormatter(slots=["{{content}}"]),
|
||||
)
|
||||
|
||||
|
||||
# copied from chatml template
|
||||
register_template(
|
||||
name="ernie",
|
||||
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n\n<|im_start|>assistant\n"]),
|
||||
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n\n"]),
|
||||
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n\n"]),
|
||||
format_observation=StringFormatter(slots=["<|im_start|>tool\n{{content}}<|im_end|>\n\n<|im_start|>assistant\n"]),
|
||||
default_system="<global_setting>\nthink_mode=True\n</global_setting>",
|
||||
stop_words=["<|im_end|>"],
|
||||
)
|
||||
|
||||
|
||||
register_template(
|
||||
name="ernie_nothink",
|
||||
format_user=StringFormatter(slots=["User: {{content}}\nAssistant: "]),
|
||||
format_assistant=StringFormatter(slots=["{{content}}<|end_of_sentence|>"]),
|
||||
format_system=StringFormatter(slots=["{{content}}\n"]),
|
||||
format_prefix=EmptyFormatter(slots=["<|begin_of_sentence|>"]),
|
||||
stop_words=["<|end_of_sentence|>"],
|
||||
)
|
||||
|
||||
|
||||
register_template(
|
||||
name="exaone",
|
||||
format_user=StringFormatter(slots=["[|user|]{{content}}\n[|assistant|]"]),
|
||||
@@ -916,6 +972,18 @@ register_template(
|
||||
)
|
||||
|
||||
|
||||
# copied from chatml template
|
||||
register_template(
|
||||
name="falcon_h1",
|
||||
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
|
||||
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
|
||||
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
|
||||
format_observation=StringFormatter(slots=["<|im_start|>tool\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
|
||||
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
|
||||
stop_words=["<|im_end|>", "<|end_of_text|>"],
|
||||
)
|
||||
|
||||
|
||||
register_template(
|
||||
name="fewshot",
|
||||
format_assistant=StringFormatter(slots=["{{content}}\n\n"]),
|
||||
@@ -939,6 +1007,22 @@ register_template(
|
||||
)
|
||||
|
||||
|
||||
# copied from gemma template
|
||||
register_template(
|
||||
name="gemma2",
|
||||
format_user=StringFormatter(slots=["<start_of_turn>user\n{{content}}<end_of_turn>\n<start_of_turn>model\n"]),
|
||||
format_assistant=StringFormatter(slots=["{{content}}<end_of_turn>\n"]),
|
||||
format_system=StringFormatter(slots=["{{content}}\n\n"]),
|
||||
format_observation=StringFormatter(
|
||||
slots=["<start_of_turn>tool\n{{content}}<end_of_turn>\n<start_of_turn>model\n"]
|
||||
),
|
||||
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
|
||||
stop_words=["<eos>", "<end_of_turn>"],
|
||||
efficient_eos=True,
|
||||
template_class=Llama2Template,
|
||||
)
|
||||
|
||||
|
||||
# copied from gemma template
|
||||
register_template(
|
||||
name="gemma3",
|
||||
@@ -956,6 +1040,22 @@ register_template(
|
||||
)
|
||||
|
||||
|
||||
register_template(
|
||||
name="gemma3n",
|
||||
format_user=StringFormatter(slots=["<start_of_turn>user\n{{content}}<end_of_turn>\n<start_of_turn>model\n"]),
|
||||
format_assistant=StringFormatter(slots=["{{content}}<end_of_turn>\n"]),
|
||||
format_system=StringFormatter(slots=["{{content}}\n\n"]),
|
||||
format_observation=StringFormatter(
|
||||
slots=["<start_of_turn>tool\n{{content}}<end_of_turn>\n<start_of_turn>model\n"]
|
||||
),
|
||||
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
|
||||
stop_words=["<end_of_turn>"],
|
||||
replace_eos=True,
|
||||
mm_plugin=get_mm_plugin("gemma3n", image_token="<image_soft_token>", audio_token="<audio_soft_token>"),
|
||||
template_class=Llama2Template,
|
||||
)
|
||||
|
||||
|
||||
register_template(
|
||||
name="glm4",
|
||||
format_user=StringFormatter(slots=["<|user|>\n{{content}}<|assistant|>"]),
|
||||
@@ -970,6 +1070,56 @@ register_template(
|
||||
)
|
||||
|
||||
|
||||
# copied from glm4 template
|
||||
register_template(
|
||||
name="glm4_moe",
|
||||
format_user=StringFormatter(slots=["<|user|>\n{{content}}<|assistant|>"]),
|
||||
format_assistant=StringFormatter(slots=["\n{{content}}"]),
|
||||
format_system=StringFormatter(slots=["<|system|>\n{{content}}"]),
|
||||
format_function=FunctionFormatter(slots=["{{content}}"], tool_format="glm4_moe"),
|
||||
format_observation=StringFormatter(slots=["<|observation|>\n{{content}}<|assistant|>"]),
|
||||
format_tools=ToolFormatter(tool_format="glm4_moe"),
|
||||
format_prefix=EmptyFormatter(slots=["[gMASK]<sop>"]),
|
||||
stop_words=["<|user|>", "<|observation|>"],
|
||||
efficient_eos=True,
|
||||
template_class=ReasoningTemplate,
|
||||
)
|
||||
|
||||
|
||||
# copied from glm4 template
|
||||
register_template(
|
||||
name="glm4v",
|
||||
format_user=StringFormatter(slots=["<|user|>\n{{content}}<|assistant|>"]),
|
||||
format_assistant=StringFormatter(slots=["\n{{content}}"]),
|
||||
format_system=StringFormatter(slots=["<|system|>\n{{content}}"]),
|
||||
format_function=FunctionFormatter(slots=["{{content}}"], tool_format="glm4"),
|
||||
format_observation=StringFormatter(slots=["<|observation|>\n{{content}}<|assistant|>"]),
|
||||
format_tools=ToolFormatter(tool_format="glm4"),
|
||||
format_prefix=EmptyFormatter(slots=["[gMASK]<sop>"]),
|
||||
stop_words=["<|user|>", "<|observation|>", "</answer>"],
|
||||
efficient_eos=True,
|
||||
mm_plugin=get_mm_plugin(name="glm4v", image_token="<|image|>", video_token="<|video|>"),
|
||||
template_class=ReasoningTemplate,
|
||||
)
|
||||
|
||||
|
||||
# copied from glm4 template
|
||||
register_template(
|
||||
name="glm4v_moe",
|
||||
format_user=StringFormatter(slots=["<|user|>\n{{content}}<|assistant|>"]),
|
||||
format_assistant=StringFormatter(slots=["\n{{content}}"]),
|
||||
format_system=StringFormatter(slots=["<|system|>\n{{content}}"]),
|
||||
format_function=FunctionFormatter(slots=["{{content}}"], tool_format="glm4_moe"),
|
||||
format_observation=StringFormatter(slots=["<|observation|>\n{{content}}<|assistant|>"]),
|
||||
format_tools=ToolFormatter(tool_format="glm4_moe"),
|
||||
format_prefix=EmptyFormatter(slots=["[gMASK]<sop>"]),
|
||||
stop_words=["<|user|>", "<|observation|>", "</answer>"],
|
||||
efficient_eos=True,
|
||||
mm_plugin=get_mm_plugin(name="glm4v", image_token="<|image|>", video_token="<|video|>"),
|
||||
template_class=ReasoningTemplate,
|
||||
)
|
||||
|
||||
|
||||
# copied from glm4 template
|
||||
register_template(
|
||||
name="glmz1",
|
||||
@@ -986,6 +1136,18 @@ register_template(
|
||||
)
|
||||
|
||||
|
||||
register_template(
|
||||
name="gpt",
|
||||
format_user=StringFormatter(slots=["<|start|>user<|message|>{{content}}<|end|><|start|>assistant"]),
|
||||
format_assistant=StringFormatter(slots=["{{content}}<|end|>"]),
|
||||
format_system=StringFormatter(slots=["<|start|>system<|message|>{{content}}<|end|>"]),
|
||||
default_system="You are ChatGPT, a large language model trained by OpenAI.",
|
||||
thought_words=("<|channel|>analysis<|message|>", "<|end|><|start|>assistant<|channel|>final<|message|>"),
|
||||
efficient_eos=True,
|
||||
template_class=ReasoningTemplate,
|
||||
)
|
||||
|
||||
|
||||
register_template(
|
||||
name="granite3",
|
||||
format_user=StringFormatter(
|
||||
@@ -1010,6 +1172,25 @@ register_template(
|
||||
)
|
||||
|
||||
|
||||
register_template(
|
||||
name="granite4",
|
||||
format_user=StringFormatter(
|
||||
slots=[
|
||||
"<|start_of_role|>user<|end_of_role|>{{content}}<|end_of_text|>\n<|start_of_role|>assistant<|end_of_role|>"
|
||||
]
|
||||
),
|
||||
format_assistant=StringFormatter(slots=["{{content}}<|end_of_text|>\n"]),
|
||||
format_system=StringFormatter(slots=["<|start_of_role|>system<|end_of_role|>{{content}}<|end_of_text|>\n"]),
|
||||
format_function=FunctionFormatter(slots=["{{content}}<|end_of_text|>\n"], tool_format="default"),
|
||||
format_observation=StringFormatter(
|
||||
slots=["<|start_of_role|>tool<|end_of_role|>{{content}}<|end_of_text|>\n<|start_of_role|>assistant\n"]
|
||||
),
|
||||
format_tools=ToolFormatter(tool_format="default"),
|
||||
stop_words=["<|end_of_text|>"],
|
||||
default_system="You are Granite, developed by IBM. You are a helpful AI assistant.",
|
||||
)
|
||||
|
||||
|
||||
register_template(
|
||||
name="index",
|
||||
format_user=StringFormatter(slots=["reserved_0{{content}}reserved_1"]),
|
||||
@@ -1076,6 +1257,35 @@ register_template(
|
||||
)
|
||||
|
||||
|
||||
register_template(
|
||||
name="intern_s1",
|
||||
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
|
||||
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
|
||||
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
|
||||
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
|
||||
stop_words=["<|im_end|>"],
|
||||
mm_plugin=get_mm_plugin(name="intern_vl", image_token="<image>", video_token="<video>"),
|
||||
)
|
||||
|
||||
|
||||
# copied from qwen template
|
||||
register_template(
|
||||
name="keye_vl",
|
||||
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
|
||||
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
|
||||
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
|
||||
format_function=FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="qwen"),
|
||||
format_observation=StringFormatter(
|
||||
slots=["<|im_start|>user\n<tool_response>\n{{content}}\n</tool_response><|im_end|>\n<|im_start|>assistant\n"]
|
||||
),
|
||||
format_tools=ToolFormatter(tool_format="qwen"),
|
||||
stop_words=["<|im_end|>"],
|
||||
replace_eos=True,
|
||||
mm_plugin=get_mm_plugin(name="qwen2_vl", image_token="<|image_pad|>", video_token="<|video_pad|>"),
|
||||
template_class=ReasoningTemplate,
|
||||
)
|
||||
|
||||
|
||||
register_template(
|
||||
name="kimi_vl",
|
||||
format_user=StringFormatter(
|
||||
@@ -1409,7 +1619,7 @@ register_template(
|
||||
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
|
||||
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
|
||||
stop_words=["<|im_end|>"],
|
||||
default_system="You are Qwen, created by Alibaba Cloud. You are a helpful assistant.",
|
||||
default_system="You are a helpful assistant. You can accept audio and text input and output voice and text.",
|
||||
mm_plugin=get_mm_plugin(name="minicpm_v", image_token="<image>", video_token="<video>", audio_token="<audio>"),
|
||||
)
|
||||
|
||||
@@ -1608,6 +1818,22 @@ register_template(
|
||||
)
|
||||
|
||||
|
||||
# copied from qwen template
|
||||
register_template(
|
||||
name="qwen3_nothink",
|
||||
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
|
||||
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
|
||||
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
|
||||
format_function=FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="qwen"),
|
||||
format_observation=StringFormatter(
|
||||
slots=["<|im_start|>user\n<tool_response>\n{{content}}\n</tool_response><|im_end|>\n<|im_start|>assistant\n"]
|
||||
),
|
||||
format_tools=ToolFormatter(tool_format="qwen"),
|
||||
stop_words=["<|im_end|>"],
|
||||
replace_eos=True,
|
||||
)
|
||||
|
||||
|
||||
# copied from chatml template
|
||||
register_template(
|
||||
name="qwen2_audio",
|
||||
@@ -1636,10 +1862,55 @@ register_template(
|
||||
stop_words=["<|im_end|>"],
|
||||
replace_eos=True,
|
||||
mm_plugin=get_mm_plugin(
|
||||
name="qwen2_omni", audio_token="<|AUDIO|>", image_token="<|IMAGE|>", video_token="<|VIDEO|>"
|
||||
name="qwen2_omni",
|
||||
image_token="<|IMAGE|>",
|
||||
video_token="<|VIDEO|>",
|
||||
audio_token="<|AUDIO|>",
|
||||
vision_bos_token="<|vision_bos|>",
|
||||
vision_eos_token="<|vision_eos|>",
|
||||
audio_bos_token="<|audio_bos|>",
|
||||
audio_eos_token="<|audio_eos|>",
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
register_template(
|
||||
name="qwen3_omni",
|
||||
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
|
||||
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
|
||||
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
|
||||
format_function=FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="qwen"),
|
||||
format_observation=StringFormatter(
|
||||
slots=["<|im_start|>user\n<tool_response>\n{{content}}\n</tool_response><|im_end|>\n<|im_start|>assistant\n"]
|
||||
),
|
||||
format_tools=ToolFormatter(tool_format="qwen"),
|
||||
stop_words=["<|im_end|>"],
|
||||
replace_eos=True,
|
||||
mm_plugin=get_mm_plugin(
|
||||
name="qwen2_omni", image_token="<|image_pad|>", video_token="<|video_pad|>", audio_token="<|audio_pad|>"
|
||||
),
|
||||
template_class=ReasoningTemplate,
|
||||
)
|
||||
|
||||
|
||||
register_template(
|
||||
name="qwen3_omni_nothink",
|
||||
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
|
||||
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
|
||||
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
|
||||
format_function=FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="qwen"),
|
||||
format_observation=StringFormatter(
|
||||
slots=["<|im_start|>user\n<tool_response>\n{{content}}\n</tool_response><|im_end|>\n<|im_start|>assistant\n"]
|
||||
),
|
||||
format_tools=ToolFormatter(tool_format="qwen"),
|
||||
stop_words=["<|im_end|>"],
|
||||
replace_eos=True,
|
||||
mm_plugin=get_mm_plugin(
|
||||
name="qwen2_omni", image_token="<|image_pad|>", video_token="<|video_pad|>", audio_token="<|audio_pad|>"
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
# copied from qwen template
|
||||
register_template(
|
||||
name="qwen2_vl",
|
||||
@@ -1658,6 +1929,41 @@ register_template(
|
||||
)
|
||||
|
||||
|
||||
# copied from qwen template
|
||||
register_template(
|
||||
name="qwen3_vl",
|
||||
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
|
||||
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
|
||||
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
|
||||
format_function=FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="qwen"),
|
||||
format_observation=StringFormatter(
|
||||
slots=["<|im_start|>user\n<tool_response>\n{{content}}\n</tool_response><|im_end|>\n<|im_start|>assistant\n"]
|
||||
),
|
||||
format_tools=ToolFormatter(tool_format="qwen"),
|
||||
stop_words=["<|im_end|>"],
|
||||
replace_eos=True,
|
||||
mm_plugin=get_mm_plugin(name="qwen3_vl", image_token="<|image_pad|>", video_token="<|video_pad|>"),
|
||||
template_class=ReasoningTemplate,
|
||||
)
|
||||
|
||||
|
||||
# copied from qwen template
|
||||
register_template(
|
||||
name="qwen3_vl_nothink",
|
||||
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
|
||||
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
|
||||
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
|
||||
format_function=FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="qwen"),
|
||||
format_observation=StringFormatter(
|
||||
slots=["<|im_start|>user\n<tool_response>\n{{content}}\n</tool_response><|im_end|>\n<|im_start|>assistant\n"]
|
||||
),
|
||||
format_tools=ToolFormatter(tool_format="qwen"),
|
||||
stop_words=["<|im_end|>"],
|
||||
replace_eos=True,
|
||||
mm_plugin=get_mm_plugin(name="qwen3_vl", image_token="<|image_pad|>", video_token="<|video_pad|>"),
|
||||
)
|
||||
|
||||
|
||||
register_template(
|
||||
name="sailor",
|
||||
format_user=StringFormatter(slots=["<|im_start|>question\n{{content}}<|im_end|>\n<|im_start|>answer\n"]),
|
||||
@@ -1685,6 +1991,20 @@ register_template(
|
||||
)
|
||||
|
||||
|
||||
# copied from seed_coder
|
||||
register_template(
|
||||
name="seed_oss",
|
||||
format_user=StringFormatter(
|
||||
slots=[{"bos_token"}, "user\n{{content}}", {"eos_token"}, {"bos_token"}, "assistant\n"]
|
||||
),
|
||||
format_system=StringFormatter(slots=[{"bos_token"}, "system\n{{content}}", {"eos_token"}]),
|
||||
format_function=FunctionFormatter(slots=[{"bos_token"}, "\n{{content}}", {"eos_token"}], tool_format="seed_oss"),
|
||||
format_tools=ToolFormatter(tool_format="seed_oss"),
|
||||
template_class=ReasoningTemplate,
|
||||
thought_words=("<seed:think>", "</seed:think>"),
|
||||
)
|
||||
|
||||
|
||||
# copied from llama3 template
|
||||
register_template(
|
||||
name="skywork_o1",
|
||||
|
||||
@@ -38,8 +38,20 @@ DEFAULT_TOOL_PROMPT = (
|
||||
)
|
||||
|
||||
GLM4_TOOL_PROMPT = (
|
||||
"你是一个名为 ChatGLM 的人工智能助手。你是基于智谱AI训练的语言模型 GLM-4 模型开发的,"
|
||||
"你的任务是针对用户的问题和要求提供适当的答复和支持。# 可用工具{tool_text}"
|
||||
"你是一个名为 ChatGLM 的人工智能助手。你是基于智谱 AI 公司训练的语言模型 GLM-4 模型开发的,"
|
||||
"你的任务是针对用户的问题和要求提供适当的答复和支持。\n\n# 可用工具{tool_text}"
|
||||
)
|
||||
|
||||
GLM4_MOE_TOOL_PROMPT = (
|
||||
"\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\n"
|
||||
"You are provided with function signatures within <tools></tools> XML tags:\n<tools>{tool_text}"
|
||||
"\n</tools>\n\nFor each function call, output the function name and arguments within the following XML format:"
|
||||
"\n<tool_call>{{function-name}}"
|
||||
"\n<arg_key>{{arg-key-1}}</arg_key>"
|
||||
"\n<arg_value>{{arg-value-1}}</arg_value>"
|
||||
"\n<arg_key>{{arg-key-2}}</arg_key>"
|
||||
"\n<arg_value>{{arg-value-2}}</arg_value>"
|
||||
"\n...\n</tool_call>\n"
|
||||
)
|
||||
|
||||
LLAMA3_TOOL_PROMPT = (
|
||||
@@ -57,6 +69,23 @@ QWEN_TOOL_PROMPT = (
|
||||
""""arguments": <args-json-object>}}\n</tool_call>"""
|
||||
)
|
||||
|
||||
SEED_TOOL_PROMPT = (
|
||||
"system\nYou are Doubao, a helpful AI assistant. You may call one or more functions to assist with the user query."
|
||||
"Tool List:\nYou are authorized to use the following tools (described in JSON Schema format). Before performing "
|
||||
"any task, you must decide how to call them based on the descriptions and parameters of these tools.{tool_text}\n"
|
||||
"工具调用请遵循如下格式:\n<seed:tool_call>\n<function=example_function_name>\n<parameter=example_parameter_1>value_1"
|
||||
"</parameter>\n<parameter=example_parameter_2>This is the value for the second parameter\nthat can span\nmultiple "
|
||||
"lines</parameter>\n</function>\n</seed:tool_call>\n"
|
||||
)
|
||||
|
||||
LING_TOOL_PROMPT = (
|
||||
"# Tools\n\nYou may call one or more functions to assist with the user query.\n\n"
|
||||
"You are provided with function signatures within <tools></tools> XML tags:\n<tools>{tool_text}"
|
||||
"\n</tools>\n\nFor each function call, return a json object with function name and arguments within "
|
||||
"""<tool_call></tool_call> XML tags:\n<tool_call>\n{{"name": <function-name>, """
|
||||
""""arguments": <args-json-object>}}\n</tool_call>"""
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ToolUtils(ABC):
|
||||
@@ -303,12 +332,111 @@ class QwenToolUtils(ToolUtils):
|
||||
return results
|
||||
|
||||
|
||||
class GLM4MOEToolUtils(QwenToolUtils):
|
||||
r"""GLM-4-MOE tool using template."""
|
||||
|
||||
@override
|
||||
@staticmethod
|
||||
def tool_formatter(tools: list[dict[str, Any]]) -> str:
|
||||
tool_text = ""
|
||||
for tool in tools:
|
||||
wrapped_tool = tool if tool.get("type") == "function" else {"type": "function", "function": tool}
|
||||
tool_text += "\n" + json.dumps(wrapped_tool, ensure_ascii=False)
|
||||
|
||||
return GLM4_MOE_TOOL_PROMPT.format(tool_text=tool_text)
|
||||
|
||||
@override
|
||||
@staticmethod
|
||||
def function_formatter(functions: list["FunctionCall"]) -> str:
|
||||
function_json = [
|
||||
{"func_name": name, "func_key_values": json.loads(arguments)} for name, arguments in functions
|
||||
]
|
||||
function_texts = []
|
||||
for func in function_json:
|
||||
prompt = "\n<tool_call>" + func["func_name"]
|
||||
for key, value in func["func_key_values"].items():
|
||||
prompt += "\n<arg_key>" + key + "</arg_key>"
|
||||
if not isinstance(value, str):
|
||||
value = json.dumps(value, ensure_ascii=False)
|
||||
prompt += "\n<arg_value>" + value + "</arg_value>"
|
||||
function_texts.append(prompt)
|
||||
|
||||
return "\n".join(function_texts)
|
||||
|
||||
|
||||
class SeedToolUtils(ToolUtils):
|
||||
r"""Seed tool using template."""
|
||||
|
||||
@override
|
||||
@staticmethod
|
||||
def tool_formatter(tools: list[dict[str, Any]]) -> str:
|
||||
return SEED_TOOL_PROMPT.format(tool_text="\n" + json.dumps(tools, ensure_ascii=False))
|
||||
|
||||
@override
|
||||
@staticmethod
|
||||
def function_formatter(functions: list["FunctionCall"]) -> str:
|
||||
function_json = [
|
||||
{"func_name": name, "func_key_values": json.loads(arguments)} for name, arguments in functions
|
||||
]
|
||||
function_texts = []
|
||||
for func in function_json:
|
||||
prompt = "\n<seed:tool_call>\n<function=" + func["func_name"]
|
||||
for key, value in func["func_key_values"].items():
|
||||
prompt += "\n<parameter=" + key + ">"
|
||||
if not isinstance(value, str):
|
||||
value = json.dumps(value, ensure_ascii=False)
|
||||
prompt += value + "</parameter>"
|
||||
prompt += "\n</function>\n</seed:tool_call>"
|
||||
function_texts.append(prompt)
|
||||
|
||||
return "\n".join(function_texts)
|
||||
|
||||
@override
|
||||
@staticmethod
|
||||
def tool_extractor(content: str) -> Union[str, list["FunctionCall"]]:
|
||||
results = []
|
||||
regex = re.compile(
|
||||
r"<seed:tool_call>\s*<function=\s*([^\s<]+)\s*(.*?)\s*</function>\s*</seed:tool_call>", re.DOTALL
|
||||
)
|
||||
for func_name, params_block in re.findall(regex, content):
|
||||
args_dict = {}
|
||||
param_pattern = re.compile(r"<parameter=(.*?)>(.*?)</parameter>", re.DOTALL)
|
||||
for key, raw_value in re.findall(param_pattern, params_block.strip()):
|
||||
value = raw_value.strip()
|
||||
try:
|
||||
parsed_value = json.loads(value)
|
||||
except json.JSONDecodeError:
|
||||
parsed_value = raw_value
|
||||
args_dict[key] = parsed_value
|
||||
|
||||
results.append(FunctionCall(func_name.strip(), json.dumps(args_dict, ensure_ascii=False)))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
class LingToolUtils(QwenToolUtils):
|
||||
r"""Ling v2 tool using template."""
|
||||
|
||||
@override
|
||||
@staticmethod
|
||||
def tool_formatter(tools: list[dict[str, Any]]) -> str:
|
||||
tool_text = ""
|
||||
for tool in tools:
|
||||
wrapped_tool = tool if tool.get("type") == "function" else {"type": "function", "function": tool}
|
||||
tool_text += "\n" + json.dumps(wrapped_tool, ensure_ascii=False)
|
||||
|
||||
return LING_TOOL_PROMPT.format(tool_text=tool_text) + "\n" + "detailed thinking off"
|
||||
|
||||
|
||||
TOOLS = {
|
||||
"default": DefaultToolUtils(),
|
||||
"glm4": GLM4ToolUtils(),
|
||||
"llama3": Llama3ToolUtils(),
|
||||
"mistral": MistralToolUtils(),
|
||||
"qwen": QwenToolUtils(),
|
||||
"glm4_moe": GLM4MOEToolUtils(),
|
||||
"seed_oss": SeedToolUtils(),
|
||||
"ling": LingToolUtils(),
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -56,13 +56,13 @@ LAYERNORM_NAMES = {"norm", "ln"}
|
||||
|
||||
LLAMABOARD_CONFIG = "llamaboard_config.yaml"
|
||||
|
||||
METHODS = ["full", "freeze", "lora"]
|
||||
METHODS = ["full", "freeze", "lora", "oft"]
|
||||
|
||||
MOD_SUPPORTED_MODELS = {"bloom", "falcon", "gemma", "llama", "mistral", "mixtral", "phi", "starcoder2"}
|
||||
|
||||
MULTIMODAL_SUPPORTED_MODELS = set()
|
||||
|
||||
PEFT_METHODS = {"lora"}
|
||||
PEFT_METHODS = {"lora", "oft"}
|
||||
|
||||
RUNNING_LOG = "running_log.txt"
|
||||
|
||||
@@ -126,6 +126,7 @@ class QuantizationMethod(str, Enum):
|
||||
QUANTO = "quanto"
|
||||
EETQ = "eetq"
|
||||
HQQ = "hqq"
|
||||
MXFP4 = "mxfp4"
|
||||
|
||||
|
||||
class RopeScaling(str, Enum):
|
||||
@@ -143,7 +144,7 @@ def register_model_group(
|
||||
for name, path in models.items():
|
||||
SUPPORTED_MODELS[name] = path
|
||||
if template is not None and (
|
||||
any(suffix in name for suffix in ("-Chat", "-Distill", "-Instruct")) or multimodal
|
||||
any(suffix in name for suffix in ("-Chat", "-Distill", "-Instruct", "-Thinking")) or multimodal
|
||||
):
|
||||
DEFAULT_TEMPLATE[name] = template
|
||||
|
||||
@@ -276,7 +277,7 @@ register_model_group(
|
||||
register_model_group(
|
||||
models={
|
||||
"ChatGLM2-6B-Chat": {
|
||||
DownloadSource.DEFAULT: "THUDM/chatglm2-6b",
|
||||
DownloadSource.DEFAULT: "zai-org/chatglm2-6b",
|
||||
DownloadSource.MODELSCOPE: "ZhipuAI/chatglm2-6b",
|
||||
}
|
||||
},
|
||||
@@ -287,11 +288,11 @@ register_model_group(
|
||||
register_model_group(
|
||||
models={
|
||||
"ChatGLM3-6B-Base": {
|
||||
DownloadSource.DEFAULT: "THUDM/chatglm3-6b-base",
|
||||
DownloadSource.DEFAULT: "zai-org/chatglm3-6b-base",
|
||||
DownloadSource.MODELSCOPE: "ZhipuAI/chatglm3-6b-base",
|
||||
},
|
||||
"ChatGLM3-6B-Chat": {
|
||||
DownloadSource.DEFAULT: "THUDM/chatglm3-6b",
|
||||
DownloadSource.DEFAULT: "zai-org/chatglm3-6b",
|
||||
DownloadSource.MODELSCOPE: "ZhipuAI/chatglm3-6b",
|
||||
},
|
||||
},
|
||||
@@ -333,7 +334,7 @@ register_model_group(
|
||||
register_model_group(
|
||||
models={
|
||||
"CodeGeeX4-9B-Chat": {
|
||||
DownloadSource.DEFAULT: "THUDM/codegeex4-all-9b",
|
||||
DownloadSource.DEFAULT: "zai-org/codegeex4-all-9b",
|
||||
DownloadSource.MODELSCOPE: "ZhipuAI/codegeex4-all-9b",
|
||||
},
|
||||
},
|
||||
@@ -589,6 +590,59 @@ register_model_group(
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"Devstral-Small-2507-Instruct": {
|
||||
DownloadSource.DEFAULT: "mistralai/Devstral-Small-2507",
|
||||
DownloadSource.MODELSCOPE: "mistralai/Devstral-Small-2507",
|
||||
},
|
||||
},
|
||||
template="mistral_small",
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"dots.ocr": {
|
||||
DownloadSource.DEFAULT: "rednote-hilab/dots.ocr",
|
||||
DownloadSource.MODELSCOPE: "rednote-hilab/dots.ocr",
|
||||
},
|
||||
},
|
||||
template="dots_ocr",
|
||||
multimodal=True,
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"ERNIE-4.5-21B-A3B-Thinking": {
|
||||
DownloadSource.DEFAULT: "baidu/ERNIE-4.5-21B-A3B-Thinking",
|
||||
DownloadSource.MODELSCOPE: "PaddlePaddle/ERNIE-4.5-21B-A3B-Thinking",
|
||||
},
|
||||
},
|
||||
template="ernie",
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"ERNIE-4.5-0.3B-PT": {
|
||||
DownloadSource.DEFAULT: "baidu/ERNIE-4.5-0.3B-PT",
|
||||
DownloadSource.MODELSCOPE: "PaddlePaddle/ERNIE-4.5-0.3B-PT",
|
||||
},
|
||||
"ERNIE-4.5-21B-A3B-PT": {
|
||||
DownloadSource.DEFAULT: "baidu/ERNIE-4.5-21B-A3B-PT",
|
||||
DownloadSource.MODELSCOPE: "PaddlePaddle/ERNIE-4.5-21B-A3B-PT",
|
||||
},
|
||||
"ERNIE-4.5-300B-A47B-PT": {
|
||||
DownloadSource.DEFAULT: "baidu/ERNIE-4.5-300B-A47B-PT",
|
||||
DownloadSource.MODELSCOPE: "PaddlePaddle/ERNIE-4.5-300B-A47B-PT",
|
||||
},
|
||||
},
|
||||
template="ernie_nothink",
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"EXAONE-3.0-7.8B-Instruct": {
|
||||
@@ -634,6 +688,61 @@ register_model_group(
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"Falcon-H1-0.5B-Base": {
|
||||
DownloadSource.DEFAULT: "tiiuae/Falcon-H1-0.5B-Base",
|
||||
DownloadSource.MODELSCOPE: "tiiuae/Falcon-H1-0.5B-Base",
|
||||
},
|
||||
"Falcon-H1-1.5B-Base": {
|
||||
DownloadSource.DEFAULT: "tiiuae/Falcon-H1-1.5B-Base",
|
||||
DownloadSource.MODELSCOPE: "tiiuae/Falcon-H1-1.5B-Base",
|
||||
},
|
||||
"Falcon-H1-1.5B-Deep-Base": {
|
||||
DownloadSource.DEFAULT: "tiuae/Falcon-H1-1.5B-Deep-Base",
|
||||
DownloadSource.MODELSCOPE: "tiiuae/Falcon-H1-1.5B-Deep-Base",
|
||||
},
|
||||
"Falcon-H1-3B-Base": {
|
||||
DownloadSource.DEFAULT: "tiiuae/Falcon-H1-3B-Base",
|
||||
DownloadSource.MODELSCOPE: "tiiuae/Falcon-H1-3B-Base",
|
||||
},
|
||||
"Falcon-H1-7B-Base": {
|
||||
DownloadSource.DEFAULT: "tiiuae/Falcon-H1-7B-Base",
|
||||
DownloadSource.MODELSCOPE: "tiiuae/Falcon-H1-7B-Base",
|
||||
},
|
||||
"Falcon-H1-34B-Base": {
|
||||
DownloadSource.DEFAULT: "tiiuae/Falcon-H1-34B-Base",
|
||||
DownloadSource.MODELSCOPE: "tiiuae/Falcon-H1-34B-Base",
|
||||
},
|
||||
"Falcon-H1-0.5B-Instruct": {
|
||||
DownloadSource.DEFAULT: "tiiuae/Falcon-H1-0.5B-Instruct",
|
||||
DownloadSource.MODELSCOPE: "tiiuae/Falcon-H1-0.5B-Instruct",
|
||||
},
|
||||
"Falcon-H1-1.5B-Instruct": {
|
||||
DownloadSource.DEFAULT: "tiiuae/Falcon-H1-1.5B-Instruct",
|
||||
DownloadSource.MODELSCOPE: "tiiuae/Falcon-H1-1.5B-Instruct",
|
||||
},
|
||||
"Falcon-H1-1.5B-Deep-Instruct": {
|
||||
DownloadSource.DEFAULT: "tiiuae/Falcon-H1-1.5B-Deep-Instruct",
|
||||
DownloadSource.MODELSCOPE: "tiiuae/Falcon-H1-1.5B-Deep-Instruct",
|
||||
},
|
||||
"Falcon-H1-3B-Instruct": {
|
||||
DownloadSource.DEFAULT: "tiiuae/Falcon-H1-3B-Instruct",
|
||||
DownloadSource.MODELSCOPE: "tiiuae/Falcon-H1-3B-Instruct",
|
||||
},
|
||||
"Falcon-H1-7B-Instruct": {
|
||||
DownloadSource.DEFAULT: "tiiuae/Falcon-H1-7B-Instruct",
|
||||
DownloadSource.MODELSCOPE: "tiiuae/Falcon-H1-7B-Instruct",
|
||||
},
|
||||
"Falcon-H1-34B-Instruct": {
|
||||
DownloadSource.DEFAULT: "tiiuae/Falcon-H1-34B-Instruct",
|
||||
DownloadSource.MODELSCOPE: "tiiuae/Falcon-H1-34B-Instruct",
|
||||
},
|
||||
},
|
||||
template="falcon_h1",
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"Gemma-2B": {
|
||||
@@ -658,6 +767,13 @@ register_model_group(
|
||||
"Gemma-1.1-7B-Instruct": {
|
||||
DownloadSource.DEFAULT: "google/gemma-1.1-7b-it",
|
||||
},
|
||||
},
|
||||
template="gemma",
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"Gemma-2-2B": {
|
||||
DownloadSource.DEFAULT: "google/gemma-2-2b",
|
||||
DownloadSource.MODELSCOPE: "LLM-Research/gemma-2-2b",
|
||||
@@ -684,10 +800,18 @@ register_model_group(
|
||||
DownloadSource.DEFAULT: "google/gemma-2-27b-it",
|
||||
DownloadSource.MODELSCOPE: "LLM-Research/gemma-2-27b-it",
|
||||
},
|
||||
"Gemma-3-270M": {
|
||||
DownloadSource.DEFAULT: "google/gemma-3-270m",
|
||||
DownloadSource.MODELSCOPE: "LLM-Research/gemma-3-270m",
|
||||
},
|
||||
"Gemma-3-1B": {
|
||||
DownloadSource.DEFAULT: "google/gemma-3-1b-pt",
|
||||
DownloadSource.MODELSCOPE: "LLM-Research/gemma-3-1b-pt",
|
||||
},
|
||||
"Gemma-3-270M-Instruct": {
|
||||
DownloadSource.DEFAULT: "google/gemma-3-270m-it",
|
||||
DownloadSource.MODELSCOPE: "LLM-Research/gemma-3-270m-it",
|
||||
},
|
||||
"Gemma-3-1B-Instruct": {
|
||||
DownloadSource.DEFAULT: "google/gemma-3-1b-it",
|
||||
DownloadSource.MODELSCOPE: "LLM-Research/gemma-3-1b-it",
|
||||
@@ -697,7 +821,7 @@ register_model_group(
|
||||
DownloadSource.MODELSCOPE: "google/medgemma-27b-text-it",
|
||||
},
|
||||
},
|
||||
template="gemma",
|
||||
template="gemma2",
|
||||
)
|
||||
|
||||
|
||||
@@ -735,37 +859,65 @@ register_model_group(
|
||||
DownloadSource.DEFAULT: "google/medgemma-4b-it",
|
||||
DownloadSource.MODELSCOPE: "google/medgemma-4b-it",
|
||||
},
|
||||
"MedGemma-27B-Instruct": {
|
||||
DownloadSource.DEFAULT: "google/medgemma-27b-text-it",
|
||||
DownloadSource.MODELSCOPE: "google/medgemma-27b-text-it",
|
||||
},
|
||||
},
|
||||
template="gemma3",
|
||||
multimodal=True,
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"Gemma-3n-E2B": {
|
||||
DownloadSource.DEFAULT: "google/gemma-3n-E2B",
|
||||
DownloadSource.MODELSCOPE: "LLM-Research/gemma-3n-E2B",
|
||||
},
|
||||
"Gemma-3n-E4B": {
|
||||
DownloadSource.DEFAULT: "google/gemma-3n-E4B",
|
||||
DownloadSource.MODELSCOPE: "LLM-Research/gemma-3n-E4B",
|
||||
},
|
||||
"Gemma-3n-E2B-Instruct": {
|
||||
DownloadSource.DEFAULT: "google/gemma-3n-E2B-it",
|
||||
DownloadSource.MODELSCOPE: "LLM-Research/gemma-3n-E2B-it",
|
||||
},
|
||||
"Gemma-3n-E4B-Instruct": {
|
||||
DownloadSource.DEFAULT: "google/gemma-3n-E4B-it",
|
||||
DownloadSource.MODELSCOPE: "LLM-Research/gemma-3n-E4B-it",
|
||||
},
|
||||
},
|
||||
template="gemma3n",
|
||||
multimodal=True,
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"GLM-4-9B": {
|
||||
DownloadSource.DEFAULT: "THUDM/glm-4-9b",
|
||||
DownloadSource.DEFAULT: "zai-org/glm-4-9b",
|
||||
DownloadSource.MODELSCOPE: "ZhipuAI/glm-4-9b",
|
||||
},
|
||||
"GLM-4-9B-Chat": {
|
||||
DownloadSource.DEFAULT: "THUDM/glm-4-9b-chat",
|
||||
DownloadSource.DEFAULT: "zai-org/glm-4-9b-chat",
|
||||
DownloadSource.MODELSCOPE: "ZhipuAI/glm-4-9b-chat",
|
||||
DownloadSource.OPENMIND: "LlamaFactory/glm-4-9b-chat",
|
||||
},
|
||||
"GLM-4-9B-1M-Chat": {
|
||||
DownloadSource.DEFAULT: "THUDM/glm-4-9b-chat-1m",
|
||||
DownloadSource.DEFAULT: "zai-org/glm-4-9b-chat-1m",
|
||||
DownloadSource.MODELSCOPE: "ZhipuAI/glm-4-9b-chat-1m",
|
||||
},
|
||||
"GLM-4-0414-9B-Chat": {
|
||||
DownloadSource.DEFAULT: "THUDM/GLM-4-9B-0414",
|
||||
DownloadSource.DEFAULT: "zai-org/GLM-4-9B-0414",
|
||||
DownloadSource.MODELSCOPE: "ZhipuAI/GLM-4-9B-0414",
|
||||
},
|
||||
"GLM-4-0414-32B-Base": {
|
||||
DownloadSource.DEFAULT: "THUDM/GLM-4-32B-Base-0414",
|
||||
DownloadSource.DEFAULT: "zai-org/GLM-4-32B-Base-0414",
|
||||
DownloadSource.MODELSCOPE: "ZhipuAI/GLM-4-32B-Base-0414",
|
||||
},
|
||||
"GLM-4-0414-32B-Chat": {
|
||||
DownloadSource.DEFAULT: "THUDM/GLM-4-32B-0414",
|
||||
DownloadSource.DEFAULT: "zai-org/GLM-4-32B-0414",
|
||||
DownloadSource.MODELSCOPE: "ZhipuAI/GLM-4-32B-0414",
|
||||
},
|
||||
},
|
||||
@@ -773,14 +925,65 @@ register_model_group(
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"GLM-4.1V-9B-Base": {
|
||||
DownloadSource.DEFAULT: "zai-org/GLM-4.1V-9B-Base",
|
||||
DownloadSource.MODELSCOPE: "ZhipuAI/GLM-4.1V-9B-Base",
|
||||
},
|
||||
"GLM-4.1V-9B-Thinking": {
|
||||
DownloadSource.DEFAULT: "zai-org/GLM-4.1V-9B-Thinking",
|
||||
DownloadSource.MODELSCOPE: "ZhipuAI/GLM-4.1V-9B-Thinking",
|
||||
},
|
||||
},
|
||||
template="glm4v",
|
||||
multimodal=True,
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"GLM-4.5-Air-Base": {
|
||||
DownloadSource.DEFAULT: "zai-org/GLM-4.5-Air-Base",
|
||||
DownloadSource.MODELSCOPE: "ZhipuAI/GLM-4.5-Air-Base",
|
||||
},
|
||||
"GLM-4.5-Base": {
|
||||
DownloadSource.DEFAULT: "zai-org/GLM-4.5-Base",
|
||||
DownloadSource.MODELSCOPE: "ZhipuAI/GLM-4.5-Base",
|
||||
},
|
||||
"GLM-4.5-Air-Thinking": {
|
||||
DownloadSource.DEFAULT: "zai-org/GLM-4.5-Air",
|
||||
DownloadSource.MODELSCOPE: "ZhipuAI/GLM-4.5-Air",
|
||||
},
|
||||
"GLM-4.5-Thinking": {
|
||||
DownloadSource.DEFAULT: "zai-org/GLM-4.5",
|
||||
DownloadSource.MODELSCOPE: "ZhipuAI/GLM-4.5",
|
||||
},
|
||||
},
|
||||
template="glm4_moe",
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"GLM-4.5V-Air-Thinking": {
|
||||
DownloadSource.DEFAULT: "zai-org/GLM-4.5V",
|
||||
DownloadSource.MODELSCOPE: "ZhipuAI/GLM-4.5V",
|
||||
}
|
||||
},
|
||||
template="glm4v_moe",
|
||||
multimodal=True,
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"GLM-Z1-0414-9B-Chat": {
|
||||
DownloadSource.DEFAULT: "THUDM/GLM-Z1-9B-0414",
|
||||
DownloadSource.DEFAULT: "zai-org/GLM-Z1-9B-0414",
|
||||
DownloadSource.MODELSCOPE: "ZhipuAI/GLM-Z1-9B-0414",
|
||||
},
|
||||
"GLM-Z1-0414-32B-Chat": {
|
||||
DownloadSource.DEFAULT: "THUDM/GLM-Z1-32B-0414",
|
||||
DownloadSource.DEFAULT: "zai-org/GLM-Z1-32B-0414",
|
||||
DownloadSource.MODELSCOPE: "ZhipuAI/GLM-Z1-32B-0414",
|
||||
},
|
||||
},
|
||||
@@ -810,6 +1013,21 @@ register_model_group(
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"GPT-OSS-20B-Thinking": {
|
||||
DownloadSource.DEFAULT: "openai/gpt-oss-20b",
|
||||
DownloadSource.MODELSCOPE: "openai/gpt-oss-20b",
|
||||
},
|
||||
"GPT-OSS-120B-Thinking": {
|
||||
DownloadSource.DEFAULT: "openai/gpt-oss-120b",
|
||||
DownloadSource.MODELSCOPE: "openai/gpt-oss-120b",
|
||||
},
|
||||
},
|
||||
template="gpt",
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"Granite-3.0-1B-A400M-Base": {
|
||||
@@ -917,6 +1135,17 @@ register_model_group(
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"Granite-4.0-tiny-preview": {
|
||||
DownloadSource.DEFAULT: "ibm-granite/granite-4.0-tiny-preview",
|
||||
DownloadSource.MODELSCOPE: "ibm-granite/granite-4.0-tiny-preview",
|
||||
},
|
||||
},
|
||||
template="granite4",
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"Hunyuan-7B-Instruct": {
|
||||
@@ -1073,12 +1302,52 @@ register_model_group(
|
||||
DownloadSource.DEFAULT: "OpenGVLab/InternVL3-78B-hf",
|
||||
DownloadSource.MODELSCOPE: "OpenGVLab/InternVL3-78B-hf",
|
||||
},
|
||||
"InternVL3.5-1B-hf": {
|
||||
DownloadSource.DEFAULT: "OpenGVLab/InternVL3_5-1B-HF",
|
||||
DownloadSource.MODELSCOPE: "OpenGVLab/InternVL3_5-1B-HF",
|
||||
},
|
||||
"InternVL3.5-2B-hf": {
|
||||
DownloadSource.DEFAULT: "OpenGVLab/InternVL3_5-2B-HF",
|
||||
DownloadSource.MODELSCOPE: "OpenGVLab/InternVL3_5-2B-HF",
|
||||
},
|
||||
"InternVL3.5-4B-hf": {
|
||||
DownloadSource.DEFAULT: "OpenGVLab/InternVL3_5-4B-HF",
|
||||
DownloadSource.MODELSCOPE: "OpenGVLab/InternVL3_5-4B-HF",
|
||||
},
|
||||
"InternVL3.5-8B-hf": {
|
||||
DownloadSource.DEFAULT: "OpenGVLab/InternVL3_5-8B-HF",
|
||||
DownloadSource.MODELSCOPE: "OpenGVLab/InternVL3_5-8B-HF",
|
||||
},
|
||||
"InternVL3.5-14B-hf": {
|
||||
DownloadSource.DEFAULT: "OpenGVLab/InternVL3_5-14B-HF",
|
||||
DownloadSource.MODELSCOPE: "OpenGVLab/InternVL3_5-14B-HF",
|
||||
},
|
||||
"InternVL3.5-30B-A3B-hf": {
|
||||
DownloadSource.DEFAULT: "OpenGVLab/InternVL3_5-30B-A3B-HF",
|
||||
DownloadSource.MODELSCOPE: "OpenGVLab/InternVL3_5-30B-A3B-HF",
|
||||
},
|
||||
"InternVL3.5-38B-hf": {
|
||||
DownloadSource.DEFAULT: "OpenGVLab/InternVL3_5-38B-HF",
|
||||
DownloadSource.MODELSCOPE: "OpenGVLab/InternVL3_5-38B-HF",
|
||||
},
|
||||
},
|
||||
template="intern_vl",
|
||||
multimodal=True,
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"Intern-S1-mini": {
|
||||
DownloadSource.DEFAULT: "internlm/Intern-S1-mini",
|
||||
DownloadSource.MODELSCOPE: "Shanghai_AI_Laboratory/Intern-S1-mini",
|
||||
}
|
||||
},
|
||||
template="intern_s1",
|
||||
multimodal=True,
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"Jamba-v0.1": {
|
||||
@@ -1089,6 +1358,29 @@ register_model_group(
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"Keye-VL-8B-Chat": {
|
||||
DownloadSource.DEFAULT: "Kwai-Keye/Keye-VL-8B-Preview",
|
||||
DownloadSource.MODELSCOPE: "Kwai-Keye/Keye-VL-8B-Preview",
|
||||
},
|
||||
},
|
||||
template="keye_vl",
|
||||
multimodal=True,
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"Kimi-Dev-72B-Instruct": {
|
||||
DownloadSource.DEFAULT: "moonshotai/Kimi-Dev-72B",
|
||||
DownloadSource.MODELSCOPE: "moonshotai/Kimi-Dev-72B",
|
||||
},
|
||||
},
|
||||
template="qwen",
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"Kimi-VL-A3B-Instruct": {
|
||||
@@ -1099,6 +1391,10 @@ register_model_group(
|
||||
DownloadSource.DEFAULT: "moonshotai/Kimi-VL-A3B-Thinking",
|
||||
DownloadSource.MODELSCOPE: "moonshotai/Kimi-VL-A3B-Thinking",
|
||||
},
|
||||
"Kimi-VL-A3B-Thinking-2506": {
|
||||
DownloadSource.DEFAULT: "moonshotai/Kimi-VL-A3B-Thinking-2506",
|
||||
DownloadSource.MODELSCOPE: "moonshotai/Kimi-VL-A3B-Thinking-2506",
|
||||
},
|
||||
},
|
||||
template="kimi_vl",
|
||||
multimodal=True,
|
||||
@@ -1462,20 +1758,36 @@ register_model_group(
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"MiMo-7B-VL-Instruct": {
|
||||
DownloadSource.DEFAULT: "XiaomiMiMo/MiMo-VL-7B-SFT",
|
||||
DownloadSource.MODELSCOPE: "XiaomiMiMo/MiMo-VL-7B-SFT",
|
||||
},
|
||||
"MiMo-7B-VL-RL": {
|
||||
DownloadSource.DEFAULT: "XiaomiMiMo/MiMo-VL-7B-RL",
|
||||
DownloadSource.MODELSCOPE: "XiaomiMiMo/MiMo-VL-7B-RL",
|
||||
},
|
||||
"MiMo-VL-7B-RL-2508": {
|
||||
DownloadSource.DEFAULT: "XiaomiMiMo/MiMo-VL-7B-RL-2508",
|
||||
DownloadSource.MODELSCOPE: "XiaomiMiMo/MiMo-VL-7B-RL-2508",
|
||||
},
|
||||
},
|
||||
template="mimo_vl",
|
||||
multimodal=True,
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"MiMo-7B-VL-Instruct": {
|
||||
DownloadSource.DEFAULT: "XiaomiMiMo/MiMo-VL-7B-SFT",
|
||||
DownloadSource.MODELSCOPE: "XiaomiMiMo/MiMo-VL-7B-SFT",
|
||||
},
|
||||
"MiMo-VL-7B-SFT-2508": {
|
||||
DownloadSource.DEFAULT: "XiaomiMiMo/MiMo-VL-7B-SFT-2508",
|
||||
DownloadSource.DEFAULT: "XiaomiMiMo/MiMo-VL-7B-SFT-2508",
|
||||
},
|
||||
},
|
||||
template="qwen2_vl",
|
||||
multimodal=True,
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"MiniCPM-2B-SFT-Chat": {
|
||||
@@ -1513,6 +1825,10 @@ register_model_group(
|
||||
DownloadSource.DEFAULT: "openbmb/MiniCPM4-8B",
|
||||
DownloadSource.MODELSCOPE: "OpenBMB/MiniCPM4-8B",
|
||||
},
|
||||
"MiniCPM4.1-8B-Chat": {
|
||||
DownloadSource.DEFAULT: "openbmb/MiniCPM4.1-8B",
|
||||
DownloadSource.MODELSCOPE: "OpenBMB/MiniCPM4.1-8B",
|
||||
},
|
||||
},
|
||||
template="cpm4",
|
||||
)
|
||||
@@ -1520,7 +1836,7 @@ register_model_group(
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"MiniCPM-o-2_6": {
|
||||
"MiniCPM-o-2.6": {
|
||||
DownloadSource.DEFAULT: "openbmb/MiniCPM-o-2_6",
|
||||
DownloadSource.MODELSCOPE: "OpenBMB/MiniCPM-o-2_6",
|
||||
},
|
||||
@@ -1532,7 +1848,7 @@ register_model_group(
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"MiniCPM-V-2_6": {
|
||||
"MiniCPM-V-2.6": {
|
||||
DownloadSource.DEFAULT: "openbmb/MiniCPM-V-2_6",
|
||||
DownloadSource.MODELSCOPE: "OpenBMB/MiniCPM-V-2_6",
|
||||
},
|
||||
@@ -1542,6 +1858,30 @@ register_model_group(
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"MiniCPM-V-4": {
|
||||
DownloadSource.DEFAULT: "openbmb/MiniCPM-V-4",
|
||||
DownloadSource.MODELSCOPE: "OpenBMB/MiniCPM-V-4",
|
||||
},
|
||||
},
|
||||
template="minicpm_v",
|
||||
multimodal=True,
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"MiniCPM-V-4.5": {
|
||||
DownloadSource.DEFAULT: "openbmb/MiniCPM-V-4_5",
|
||||
DownloadSource.MODELSCOPE: "OpenBMB/MiniCPM-V-4_5",
|
||||
},
|
||||
},
|
||||
template="minicpm_v",
|
||||
multimodal=True,
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"Ministral-8B-Instruct-2410": {
|
||||
@@ -1617,6 +1957,10 @@ register_model_group(
|
||||
DownloadSource.DEFAULT: "mistralai/Mistral-Small-3.1-24B-Instruct-2503",
|
||||
DownloadSource.MODELSCOPE: "mistralai/Mistral-Small-3.1-24B-Instruct-2503",
|
||||
},
|
||||
"Mistral-Small-3.2-24B-Instruct": {
|
||||
DownloadSource.DEFAULT: "mistralai/Mistral-Small-3.2-24B-Instruct-2506",
|
||||
DownloadSource.MODELSCOPE: "mistralai/Mistral-Small-3.2-24B-Instruct-2506",
|
||||
},
|
||||
},
|
||||
template="mistral_small",
|
||||
multimodal=True,
|
||||
@@ -1646,6 +1990,37 @@ register_model_group(
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"MobileLLM-R1-140M-Base": {
|
||||
DownloadSource.DEFAULT: "facebook/MobileLLM-R1-140M-base",
|
||||
DownloadSource.MODELSCOPE: "facebook/MobileLLM-R1-140M-base",
|
||||
},
|
||||
"MobileLLM-R1-360M-Base": {
|
||||
DownloadSource.DEFAULT: "facebook/MobileLLM-R1-360M-base",
|
||||
DownloadSource.MODELSCOPE: "facebook/MobileLLM-R1-360M-base",
|
||||
},
|
||||
"MobileLLM-R1-950M-Base": {
|
||||
DownloadSource.DEFAULT: "facebook/MobileLLM-R1-950M-base",
|
||||
DownloadSource.MODELSCOPE: "facebook/MobileLLM-R1-950M-base",
|
||||
},
|
||||
"MobileLLM-R1-140M-Instruct": {
|
||||
DownloadSource.DEFAULT: "facebook/MobileLLM-R1-140M",
|
||||
DownloadSource.MODELSCOPE: "facebook/MobileLLM-R1-140M",
|
||||
},
|
||||
"MobileLLM-R1-360M-Instruct": {
|
||||
DownloadSource.DEFAULT: "facebook/MobileLLM-R1-360M",
|
||||
DownloadSource.MODELSCOPE: "facebook/MobileLLM-R1-360M",
|
||||
},
|
||||
"MobileLLM-R1-950M-Instruct": {
|
||||
DownloadSource.DEFAULT: "facebook/MobileLLM-R1-950M",
|
||||
DownloadSource.MODELSCOPE: "facebook/MobileLLM-R1-950M",
|
||||
},
|
||||
},
|
||||
template="llama3",
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"Moonlight-16B-A3B": {
|
||||
@@ -2538,75 +2913,114 @@ register_model_group(
|
||||
DownloadSource.DEFAULT: "Qwen/Qwen3-30B-A3B-Base",
|
||||
DownloadSource.MODELSCOPE: "Qwen/Qwen3-30B-A3B-Base",
|
||||
},
|
||||
"Qwen3-0.6B-Instruct": {
|
||||
"Qwen3-0.6B-Thinking": {
|
||||
DownloadSource.DEFAULT: "Qwen/Qwen3-0.6B",
|
||||
DownloadSource.MODELSCOPE: "Qwen/Qwen3-0.6B",
|
||||
},
|
||||
"Qwen3-1.7B-Instruct": {
|
||||
"Qwen3-1.7B-Thinking": {
|
||||
DownloadSource.DEFAULT: "Qwen/Qwen3-1.7B",
|
||||
DownloadSource.MODELSCOPE: "Qwen/Qwen3-1.7B",
|
||||
},
|
||||
"Qwen3-4B-Instruct": {
|
||||
"Qwen3-4B-Thinking": {
|
||||
DownloadSource.DEFAULT: "Qwen/Qwen3-4B",
|
||||
DownloadSource.MODELSCOPE: "Qwen/Qwen3-4B",
|
||||
},
|
||||
"Qwen3-8B-Instruct": {
|
||||
"Qwen3-4B-Thinking-2507": {
|
||||
DownloadSource.DEFAULT: "Qwen/Qwen3-4B-Thinking-2507",
|
||||
DownloadSource.MODELSCOPE: "Qwen/Qwen3-4B-Thinking-2507",
|
||||
},
|
||||
"Qwen3-8B-Thinking": {
|
||||
DownloadSource.DEFAULT: "Qwen/Qwen3-8B",
|
||||
DownloadSource.MODELSCOPE: "Qwen/Qwen3-8B",
|
||||
},
|
||||
"Qwen3-14B-Instruct": {
|
||||
"Qwen3-14B-Thinking": {
|
||||
DownloadSource.DEFAULT: "Qwen/Qwen3-14B",
|
||||
DownloadSource.MODELSCOPE: "Qwen/Qwen3-14B",
|
||||
},
|
||||
"Qwen3-32B-Instruct": {
|
||||
"Qwen3-32B-Thinking": {
|
||||
DownloadSource.DEFAULT: "Qwen/Qwen3-32B",
|
||||
DownloadSource.MODELSCOPE: "Qwen/Qwen3-32B",
|
||||
},
|
||||
"Qwen3-30B-A3B-Instruct": {
|
||||
"Qwen3-30B-A3B-Thinking": {
|
||||
DownloadSource.DEFAULT: "Qwen/Qwen3-30B-A3B",
|
||||
DownloadSource.MODELSCOPE: "Qwen/Qwen3-30B-A3B",
|
||||
},
|
||||
"Qwen3-235B-A22B-Instruct": {
|
||||
"Qwen3-30B-A3B-Thinking-2507": {
|
||||
DownloadSource.DEFAULT: "Qwen/Qwen3-30B-A3B-Thinking-2507",
|
||||
DownloadSource.MODELSCOPE: "Qwen/Qwen3-30B-A3B-Thinking-2507",
|
||||
},
|
||||
"Qwen3-235B-A22B-Thinking": {
|
||||
DownloadSource.DEFAULT: "Qwen/Qwen3-235B-A22B",
|
||||
DownloadSource.MODELSCOPE: "Qwen/Qwen3-235B-A22B",
|
||||
},
|
||||
"Qwen3-0.6B-Instruct-GPTQ-Int8": {
|
||||
"Qwen3-235B-A22B-Thinking-2507": {
|
||||
DownloadSource.DEFAULT: "Qwen/Qwen3-235B-A22B-Thinking-2507",
|
||||
DownloadSource.MODELSCOPE: "Qwen/Qwen3-235B-A22B-Thinking-2507",
|
||||
},
|
||||
"Qwen3-0.6B-Thinking-GPTQ-Int8": {
|
||||
DownloadSource.DEFAULT: "Qwen/Qwen3-0.6B-GPTQ-Int8",
|
||||
DownloadSource.MODELSCOPE: "Qwen/Qwen3-0.6B-GPTQ-Int8",
|
||||
},
|
||||
"Qwen3-1.7B-Instruct-GPTQ-Int8": {
|
||||
"Qwen3-1.7B-Thinking-GPTQ-Int8": {
|
||||
DownloadSource.DEFAULT: "Qwen/Qwen3-1.7B-GPTQ-Int8",
|
||||
DownloadSource.MODELSCOPE: "Qwen/Qwen3-1.7B-GPTQ-Int8",
|
||||
},
|
||||
"Qwen3-4B-Instruct-AWQ": {
|
||||
"Qwen3-4B-Thinking-AWQ": {
|
||||
DownloadSource.DEFAULT: "Qwen/Qwen3-4B-AWQ",
|
||||
DownloadSource.MODELSCOPE: "Qwen/Qwen3-4B-AWQ",
|
||||
},
|
||||
"Qwen3-8B-Instruct-AWQ": {
|
||||
"Qwen3-8B-Thinking-AWQ": {
|
||||
DownloadSource.DEFAULT: "Qwen/Qwen3-8B-AWQ",
|
||||
DownloadSource.MODELSCOPE: "Qwen/Qwen3-8B-AWQ",
|
||||
},
|
||||
"Qwen3-14B-Instruct-AWQ": {
|
||||
"Qwen3-14B-Thinking-AWQ": {
|
||||
DownloadSource.DEFAULT: "Qwen/Qwen3-14B-AWQ",
|
||||
DownloadSource.MODELSCOPE: "Qwen/Qwen3-14B-AWQ",
|
||||
},
|
||||
"Qwen3-32B-Instruct-AWQ": {
|
||||
"Qwen3-32B-Thinking-AWQ": {
|
||||
DownloadSource.DEFAULT: "Qwen/Qwen3-32B-AWQ",
|
||||
DownloadSource.MODELSCOPE: "Qwen/Qwen3-32B-AWQ",
|
||||
},
|
||||
"Qwen3-30B-A3B-Instruct-GPTQ-Int4": {
|
||||
"Qwen3-30B-A3B-Thinking-GPTQ-Int4": {
|
||||
DownloadSource.DEFAULT: "Qwen/Qwen3-30B-A3B-GPTQ-Int4",
|
||||
DownloadSource.MODELSCOPE: "Qwen/Qwen3-30B-A3B-GPTQ-Int4",
|
||||
},
|
||||
"Qwen3-235B-A22B-Instruct-GPTQ-Int4": {
|
||||
"Qwen3-235B-A22B-Thinking-GPTQ-Int4": {
|
||||
DownloadSource.DEFAULT: "Qwen/Qwen3-235B-A22B-GPTQ-Int4",
|
||||
DownloadSource.MODELSCOPE: "Qwen/Qwen3-235B-A22B-GPTQ-Int4",
|
||||
},
|
||||
"Qwen/Qwen3-Next-80B-A3B-Thinking": {
|
||||
DownloadSource.DEFAULT: "Qwen/Qwen3-Next-80B-A3B-Thinking",
|
||||
DownloadSource.MODELSCOPE: "Qwen/Qwen3-Next-80B-A3B-Thinking",
|
||||
},
|
||||
},
|
||||
template="qwen3",
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"Qwen3-4B-Instruct-2507": {
|
||||
DownloadSource.DEFAULT: "Qwen/Qwen3-4B-Instruct-2507",
|
||||
DownloadSource.MODELSCOPE: "Qwen/Qwen3-4B-Instruct-2507",
|
||||
},
|
||||
"Qwen3-30B-A3B-Instruct-2507": {
|
||||
DownloadSource.DEFAULT: "Qwen/Qwen3-30B-A3B-Instruct-2507",
|
||||
DownloadSource.MODELSCOPE: "Qwen/Qwen3-30B-A3B-Instruct-2507",
|
||||
},
|
||||
"Qwen3-235B-A22B-Instruct-2507": {
|
||||
DownloadSource.DEFAULT: "Qwen/Qwen3-235B-A22B-Instruct-2507",
|
||||
DownloadSource.MODELSCOPE: "Qwen/Qwen3-235B-A22B-Instruct-2507",
|
||||
},
|
||||
"Qwen3-Next-80B-A3B-Instruct": {
|
||||
DownloadSource.DEFAULT: "Qwen/Qwen3-Next-80B-A3B-Instruct",
|
||||
DownloadSource.MODELSCOPE: "Qwen/Qwen3-Next-80B-A3B-Instruct",
|
||||
},
|
||||
},
|
||||
template="qwen3_nothink",
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"Qwen2-Audio-7B": {
|
||||
@@ -2647,6 +3061,34 @@ register_model_group(
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"Qwen3-Omni-30B-A3B-Captioner": {
|
||||
DownloadSource.DEFAULT: "Qwen/Qwen3-Omni-30B-A3B-Captioner",
|
||||
DownloadSource.MODELSCOPE: "Qwen/Qwen3-Omni-30B-A3B-Captioner",
|
||||
},
|
||||
"Qwen3-Omni-30B-A3B-Instruct": {
|
||||
DownloadSource.DEFAULT: "Qwen/Qwen3-Omni-30B-A3B-Instruct",
|
||||
DownloadSource.MODELSCOPE: "Qwen/Qwen3-Omni-30B-A3B-Instruct",
|
||||
},
|
||||
},
|
||||
template="qwen3_omni_nothink",
|
||||
multimodal=True,
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"Qwen3-Omni-30B-A3B-Thinking": {
|
||||
DownloadSource.DEFAULT: "Qwen/Qwen3-Omni-30B-A3B-Thinking",
|
||||
DownloadSource.MODELSCOPE: "Qwen/Qwen3-Omni-30B-A3B-Thinking",
|
||||
},
|
||||
},
|
||||
template="qwen3_omni",
|
||||
multimodal=True,
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"Qwen2-VL-2B": {
|
||||
@@ -2749,22 +3191,68 @@ register_model_group(
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"Qwen3-VL-235B-A22B-Instruct": {
|
||||
DownloadSource.DEFAULT: "Qwen/Qwen3-VL-235B-A22B-Instruct",
|
||||
DownloadSource.MODELSCOPE: "Qwen/Qwen3-VL-235B-A22B-Instruct",
|
||||
},
|
||||
},
|
||||
template="qwen3_vl_nothink",
|
||||
multimodal=True,
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"Qwen3-VL-235B-A22B-Thinking": {
|
||||
DownloadSource.DEFAULT: "Qwen/Qwen3-VL-235B-A22B-Thinking",
|
||||
DownloadSource.MODELSCOPE: "Qwen/Qwen3-VL-235B-A22B-Thinking",
|
||||
},
|
||||
},
|
||||
template="qwen3_vl",
|
||||
multimodal=True,
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"Seed-Coder-8B-Base": {
|
||||
DownloadSource.DEFAULT: "ByteDance-Seed/Seed-Coder-8B-Base",
|
||||
DownloadSource.MODELSCOPE: "ByteDance-Seed/Seed-Coder-8B-Base",
|
||||
},
|
||||
"Seed-Coder-8B-Instruct": {
|
||||
DownloadSource.DEFAULT: "ByteDance-Seed/Seed-Coder-8B-Instruct",
|
||||
DownloadSource.MODELSCOPE: "ByteDance-Seed/Seed-Coder-8B-Instruct",
|
||||
},
|
||||
"Seed-Coder-8B-Instruct-Reasoning": {
|
||||
"Seed-Coder-8B-Thinking": {
|
||||
DownloadSource.DEFAULT: "ByteDance-Seed/Seed-Coder-8B-Reasoning-bf16",
|
||||
DownloadSource.MODELSCOPE: "ByteDance-Seed/Seed-Coder-8B-Reasoning-bf16",
|
||||
},
|
||||
},
|
||||
template="seed_coder",
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"Seed-OSS-36B-Base": {
|
||||
DownloadSource.DEFAULT: "ByteDance-Seed/Seed-OSS-36B-Base",
|
||||
DownloadSource.MODELSCOPE: "ByteDance-Seed/Seed-OSS-36B-Base",
|
||||
},
|
||||
"Seed-OSS-36B-Base-woSyn": {
|
||||
DownloadSource.DEFAULT: "ByteDance-Seed/Seed-OSS-36B-Base-woSyn",
|
||||
DownloadSource.MODELSCOPE: "ByteDance-Seed/Seed-OSS-36B-Base-woSyn",
|
||||
},
|
||||
"Seed-OSS-36B-Instruct": {
|
||||
DownloadSource.DEFAULT: "ByteDance-Seed/Seed-OSS-36B-Instruct",
|
||||
DownloadSource.MODELSCOPE: "ByteDance-Seed/Seed-OSS-36B-Instruct",
|
||||
},
|
||||
},
|
||||
template="seed_oss",
|
||||
)
|
||||
|
||||
|
||||
register_model_group(
|
||||
models={
|
||||
"Skywork-13B-Base": {
|
||||
|
||||
@@ -27,7 +27,7 @@ import trl
|
||||
from transformers.utils import is_torch_cuda_available, is_torch_npu_available
|
||||
|
||||
|
||||
VERSION = "0.9.3"
|
||||
VERSION = "0.9.4.dev0"
|
||||
|
||||
|
||||
def print_env() -> None:
|
||||
|
||||
@@ -50,7 +50,7 @@ class LoggerHandler(logging.Handler):
|
||||
|
||||
def _write_log(self, log_entry: str) -> None:
|
||||
with open(self.running_log, "a", encoding="utf-8") as f:
|
||||
f.write(log_entry + "\n\n")
|
||||
f.write(log_entry + "\n")
|
||||
|
||||
def emit(self, record) -> None:
|
||||
if record.name == "httpx":
|
||||
|
||||
@@ -18,11 +18,12 @@
|
||||
import gc
|
||||
import os
|
||||
import socket
|
||||
from typing import TYPE_CHECKING, Any, Literal, Union
|
||||
from typing import TYPE_CHECKING, Any, Literal, Optional, Union
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
import transformers.dynamic_module_utils
|
||||
from huggingface_hub.utils import WeakFileLock
|
||||
from transformers import InfNanRemoveLogitsProcessor, LogitsProcessorList
|
||||
from transformers.dynamic_module_utils import get_relative_imports
|
||||
from transformers.utils import (
|
||||
@@ -35,7 +36,6 @@ from transformers.utils import (
|
||||
from transformers.utils.versions import require_version
|
||||
|
||||
from . import logging
|
||||
from .packages import is_transformers_version_greater_than
|
||||
|
||||
|
||||
_is_fp16_available = is_torch_npu_available() or is_torch_cuda_available()
|
||||
@@ -94,15 +94,11 @@ def check_version(requirement: str, mandatory: bool = False) -> None:
|
||||
|
||||
def check_dependencies() -> None:
|
||||
r"""Check the version of the required packages."""
|
||||
check_version(
|
||||
"transformers>=4.45.0,<=4.52.4,!=4.46.0,!=4.46.1,!=4.46.2,!=4.46.3,!=4.47.0,!=4.47.1,!=4.48.0,!=4.52.0"
|
||||
)
|
||||
check_version("datasets>=2.16.0,<=3.6.0")
|
||||
check_version("accelerate>=0.34.0,<=1.7.0")
|
||||
check_version("peft>=0.14.0,<=0.15.2")
|
||||
check_version("transformers>=4.49.0,<=4.56.2")
|
||||
check_version("datasets>=2.16.0,<=4.0.0")
|
||||
check_version("accelerate>=1.3.0,<=1.10.1")
|
||||
check_version("peft>=0.14.0,<=0.17.1")
|
||||
check_version("trl>=0.8.6,<=0.9.6")
|
||||
if is_transformers_version_greater_than("4.46.0") and not is_transformers_version_greater_than("4.48.1"):
|
||||
logger.warning_rank0_once("There are known bugs in transformers v4.46.0-v4.48.0, please use other versions.")
|
||||
|
||||
|
||||
def calculate_tps(dataset: list[dict[str, Any]], metrics: dict[str, float], stage: Literal["sft", "rm"]) -> float:
|
||||
@@ -182,8 +178,22 @@ def get_logits_processor() -> "LogitsProcessorList":
|
||||
return logits_processor
|
||||
|
||||
|
||||
def get_current_memory() -> tuple[int, int]:
|
||||
r"""Get the available and total memory for the current device (in Bytes)."""
|
||||
if is_torch_xpu_available():
|
||||
return torch.xpu.mem_get_info()
|
||||
elif is_torch_npu_available():
|
||||
return torch.npu.mem_get_info()
|
||||
elif is_torch_mps_available():
|
||||
return torch.mps.current_allocated_memory(), torch.mps.recommended_max_memory()
|
||||
elif is_torch_cuda_available():
|
||||
return torch.cuda.mem_get_info()
|
||||
else:
|
||||
return 0, -1
|
||||
|
||||
|
||||
def get_peak_memory() -> tuple[int, int]:
|
||||
r"""Get the peak memory usage for the current device (in Bytes)."""
|
||||
r"""Get the peak memory usage (allocated, reserved) for the current device (in Bytes)."""
|
||||
if is_torch_xpu_available():
|
||||
return torch.xpu.max_memory_allocated(), torch.xpu.max_memory_reserved()
|
||||
elif is_torch_npu_available():
|
||||
@@ -193,7 +203,7 @@ def get_peak_memory() -> tuple[int, int]:
|
||||
elif is_torch_cuda_available():
|
||||
return torch.cuda.max_memory_allocated(), torch.cuda.max_memory_reserved()
|
||||
else:
|
||||
return 0, 0
|
||||
return 0, -1
|
||||
|
||||
|
||||
def has_tokenized_data(path: "os.PathLike") -> bool:
|
||||
@@ -201,9 +211,9 @@ def has_tokenized_data(path: "os.PathLike") -> bool:
|
||||
return os.path.isdir(path) and len(os.listdir(path)) > 0
|
||||
|
||||
|
||||
def infer_optim_dtype(model_dtype: "torch.dtype") -> "torch.dtype":
|
||||
def infer_optim_dtype(model_dtype: Optional["torch.dtype"]) -> "torch.dtype":
|
||||
r"""Infer the optimal dtype according to the model_dtype and device compatibility."""
|
||||
if _is_bf16_available and model_dtype == torch.bfloat16:
|
||||
if _is_bf16_available and (model_dtype == torch.bfloat16 or model_dtype is None):
|
||||
return torch.bfloat16
|
||||
elif _is_fp16_available:
|
||||
return torch.float16
|
||||
@@ -259,25 +269,36 @@ def try_download_model_from_other_hub(model_args: "ModelArguments") -> str:
|
||||
return model_args.model_name_or_path
|
||||
|
||||
if use_modelscope():
|
||||
check_version("modelscope>=1.11.0", mandatory=True)
|
||||
check_version("modelscope>=1.14.0", mandatory=True)
|
||||
from modelscope import snapshot_download # type: ignore
|
||||
from modelscope.hub.api import HubApi # type: ignore
|
||||
|
||||
if model_args.ms_hub_token:
|
||||
api = HubApi()
|
||||
api.login(model_args.ms_hub_token)
|
||||
|
||||
revision = "master" if model_args.model_revision == "main" else model_args.model_revision
|
||||
return snapshot_download(
|
||||
model_args.model_name_or_path,
|
||||
revision=revision,
|
||||
cache_dir=model_args.cache_dir,
|
||||
)
|
||||
with WeakFileLock(os.path.abspath(os.path.expanduser("~/.cache/llamafactory/modelscope.lock"))):
|
||||
model_path = snapshot_download(
|
||||
model_args.model_name_or_path,
|
||||
revision=revision,
|
||||
cache_dir=model_args.cache_dir,
|
||||
)
|
||||
|
||||
return model_path
|
||||
|
||||
if use_openmind():
|
||||
check_version("openmind>=0.8.0", mandatory=True)
|
||||
from openmind.utils.hub import snapshot_download # type: ignore
|
||||
|
||||
return snapshot_download(
|
||||
model_args.model_name_or_path,
|
||||
revision=model_args.model_revision,
|
||||
cache_dir=model_args.cache_dir,
|
||||
)
|
||||
with WeakFileLock(os.path.abspath(os.path.expanduser("~/.cache/llamafactory/openmind.lock"))):
|
||||
model_path = snapshot_download(
|
||||
model_args.model_name_or_path,
|
||||
revision=model_args.model_revision,
|
||||
cache_dir=model_args.cache_dir,
|
||||
)
|
||||
|
||||
return model_path
|
||||
|
||||
|
||||
def use_modelscope() -> bool:
|
||||
@@ -305,5 +326,5 @@ def fix_proxy(ipv6_enabled: bool = False) -> None:
|
||||
r"""Fix proxy settings for gradio ui."""
|
||||
os.environ["no_proxy"] = "localhost,127.0.0.1,0.0.0.0"
|
||||
if ipv6_enabled:
|
||||
for name in ("http_proxy", "https_proxy", "HTTP_PROXY", "HTTPS_PROXY"):
|
||||
os.environ.pop(name, None)
|
||||
os.environ.pop("http_proxy", None)
|
||||
os.environ.pop("HTTP_PROXY", None)
|
||||
|
||||
@@ -58,6 +58,10 @@ def is_apollo_available():
|
||||
return _is_package_available("apollo_torch")
|
||||
|
||||
|
||||
def is_jieba_available():
|
||||
return _is_package_available("jieba")
|
||||
|
||||
|
||||
def is_gradio_available():
|
||||
return _is_package_available("gradio")
|
||||
|
||||
|
||||
@@ -122,6 +122,48 @@ class LoraArguments:
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class OFTArguments:
|
||||
r"""Arguments pertaining to the OFT training."""
|
||||
|
||||
additional_target: Optional[str] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"help": (
|
||||
"Name(s) of modules apart from LoRA layers to be set as trainable "
|
||||
"and saved in the final checkpoint. "
|
||||
"Use commas to separate multiple modules."
|
||||
)
|
||||
},
|
||||
)
|
||||
module_dropout: float = field(
|
||||
default=0.0,
|
||||
metadata={"help": "Dropout rate for the OFT fine-tuning."},
|
||||
)
|
||||
oft_rank: int = field(
|
||||
default=0,
|
||||
metadata={"help": "The intrinsic dimension for OFT fine-tuning."},
|
||||
)
|
||||
oft_block_size: int = field(
|
||||
default=32,
|
||||
metadata={"help": "The intrinsic dimension for OFT fine-tuning."},
|
||||
)
|
||||
oft_target: str = field(
|
||||
default="all",
|
||||
metadata={
|
||||
"help": (
|
||||
"Name(s) of target modules to apply OFT. "
|
||||
"Use commas to separate multiple modules. "
|
||||
"Use `all` to specify all the linear modules."
|
||||
)
|
||||
},
|
||||
)
|
||||
create_new_adapter: bool = field(
|
||||
default=False,
|
||||
metadata={"help": "Whether or not to create a new adapter with randomly initialized weight."},
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class RLHFArguments:
|
||||
r"""Arguments pertaining to the PPO, DPO and KTO training."""
|
||||
@@ -134,6 +176,10 @@ class RLHFArguments:
|
||||
default=0.0,
|
||||
metadata={"help": "The supervised fine-tuning loss coefficient in DPO training."},
|
||||
)
|
||||
pref_bco_weight: float = field(
|
||||
default=0.0,
|
||||
metadata={"help": "The Binary Classifier Optimization coefficient in DPO training."},
|
||||
)
|
||||
pref_loss: Literal["sigmoid", "hinge", "ipo", "kto_pair", "orpo", "simpo"] = field(
|
||||
default="sigmoid",
|
||||
metadata={"help": "The type of DPO loss to use."},
|
||||
@@ -396,7 +442,14 @@ class SwanLabArguments:
|
||||
|
||||
@dataclass
|
||||
class FinetuningArguments(
|
||||
SwanLabArguments, BAdamArgument, ApolloArguments, GaloreArguments, RLHFArguments, LoraArguments, FreezeArguments
|
||||
SwanLabArguments,
|
||||
BAdamArgument,
|
||||
ApolloArguments,
|
||||
GaloreArguments,
|
||||
RLHFArguments,
|
||||
LoraArguments,
|
||||
OFTArguments,
|
||||
FreezeArguments,
|
||||
):
|
||||
r"""Arguments pertaining to which techniques we are going to fine-tuning with."""
|
||||
|
||||
@@ -424,6 +477,10 @@ class FinetuningArguments(
|
||||
default=False,
|
||||
metadata={"help": "Whether or not to use the Muon optimizer."},
|
||||
)
|
||||
use_dft_loss: bool = field(
|
||||
default=False,
|
||||
metadata={"help": "Whether to use the DFT loss."},
|
||||
)
|
||||
freeze_vision_tower: bool = field(
|
||||
default=True,
|
||||
metadata={"help": "Whether ot not to freeze the vision tower in MLLM training."},
|
||||
@@ -467,12 +524,13 @@ class FinetuningArguments(
|
||||
self.freeze_extra_modules: Optional[list[str]] = split_arg(self.freeze_extra_modules)
|
||||
self.lora_alpha: int = self.lora_alpha or self.lora_rank * 2
|
||||
self.lora_target: list[str] = split_arg(self.lora_target)
|
||||
self.oft_target: list[str] = split_arg(self.oft_target)
|
||||
self.additional_target: Optional[list[str]] = split_arg(self.additional_target)
|
||||
self.galore_target: list[str] = split_arg(self.galore_target)
|
||||
self.apollo_target: list[str] = split_arg(self.apollo_target)
|
||||
self.use_ref_model = self.stage == "dpo" and self.pref_loss not in ["orpo", "simpo"]
|
||||
|
||||
assert self.finetuning_type in ["lora", "freeze", "full"], "Invalid fine-tuning method."
|
||||
assert self.finetuning_type in ["lora", "oft", "freeze", "full"], "Invalid fine-tuning method."
|
||||
assert self.ref_model_quantization_bit in [None, 8, 4], "We only accept 4-bit or 8-bit quantization."
|
||||
assert self.reward_model_quantization_bit in [None, 8, 4], "We only accept 4-bit or 8-bit quantization."
|
||||
|
||||
@@ -482,6 +540,9 @@ class FinetuningArguments(
|
||||
if self.stage == "ppo" and self.reward_model_type == "lora" and self.finetuning_type != "lora":
|
||||
raise ValueError("`reward_model_type` cannot be lora for Freeze/Full PPO training.")
|
||||
|
||||
if self.stage == "ppo" and self.reward_model_type == "oft" and self.finetuning_type != "oft":
|
||||
raise ValueError("`reward_model_type` cannot be oft for Freeze/Full PPO training.")
|
||||
|
||||
if self.stage == "dpo" and self.pref_loss != "sigmoid" and self.dpo_label_smoothing > 1e-6:
|
||||
raise ValueError("`dpo_label_smoothing` is only valid for sigmoid loss function.")
|
||||
|
||||
|
||||
@@ -213,6 +213,23 @@ class QuantizationArguments:
|
||||
default=None,
|
||||
metadata={"help": "Device map used to infer the 4-bit quantized model, needs bitsandbytes>=0.43.0."},
|
||||
)
|
||||
fp8: bool = field(
|
||||
default=False,
|
||||
metadata={
|
||||
"help": "Enable FP8 mixed precision training via HuggingFace Accelerate. "
|
||||
"Requires PyTorch 2.7+ and Hopper architecture GPUs."
|
||||
},
|
||||
)
|
||||
fp8_backend: str = field(
|
||||
default="auto",
|
||||
metadata={
|
||||
"help": "FP8 backend to use ('auto', 'torchao', 'te', 'msamp'). 'auto' selects best available backend."
|
||||
},
|
||||
)
|
||||
fp8_enable_fsdp_float8_all_gather: bool = field(
|
||||
default=False,
|
||||
metadata={"help": "Enable FP8 optimizations for FSDP2 all-gather operations."},
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
@@ -23,7 +22,6 @@ from typing import Any, Optional, Union
|
||||
|
||||
import torch
|
||||
import transformers
|
||||
import yaml
|
||||
from omegaconf import OmegaConf
|
||||
from transformers import HfArgumentParser
|
||||
from transformers.integrations import is_deepspeed_zero3_enabled
|
||||
@@ -34,6 +32,7 @@ from transformers.utils import is_torch_bf16_gpu_available, is_torch_npu_availab
|
||||
from ..extras import logging
|
||||
from ..extras.constants import CHECKPOINT_NAMES, EngineName
|
||||
from ..extras.misc import check_dependencies, check_version, get_current_device, is_env_enabled
|
||||
from ..extras.packages import is_transformers_version_greater_than
|
||||
from .data_args import DataArguments
|
||||
from .evaluation_args import EvaluationArguments
|
||||
from .finetuning_args import FinetuningArguments
|
||||
@@ -62,11 +61,11 @@ def read_args(args: Optional[Union[dict[str, Any], list[str]]] = None) -> Union[
|
||||
|
||||
if sys.argv[1].endswith(".yaml") or sys.argv[1].endswith(".yml"):
|
||||
override_config = OmegaConf.from_cli(sys.argv[2:])
|
||||
dict_config = yaml.safe_load(Path(sys.argv[1]).absolute().read_text())
|
||||
dict_config = OmegaConf.load(Path(sys.argv[1]).absolute())
|
||||
return OmegaConf.to_container(OmegaConf.merge(dict_config, override_config))
|
||||
elif sys.argv[1].endswith(".json"):
|
||||
override_config = OmegaConf.from_cli(sys.argv[2:])
|
||||
dict_config = json.loads(Path(sys.argv[1]).absolute().read_text())
|
||||
dict_config = OmegaConf.load(Path(sys.argv[1]).absolute())
|
||||
return OmegaConf.to_container(OmegaConf.merge(dict_config, override_config))
|
||||
else:
|
||||
return sys.argv[1:]
|
||||
@@ -113,8 +112,8 @@ def _verify_model_args(
|
||||
raise ValueError("Adapter is only valid for the LoRA method.")
|
||||
|
||||
if model_args.quantization_bit is not None:
|
||||
if finetuning_args.finetuning_type != "lora":
|
||||
raise ValueError("Quantization is only compatible with the LoRA method.")
|
||||
if finetuning_args.finetuning_type not in ["lora", "oft"]:
|
||||
raise ValueError("Quantization is only compatible with the LoRA or OFT method.")
|
||||
|
||||
if finetuning_args.pissa_init:
|
||||
raise ValueError("Please use scripts/pissa_init.py to initialize PiSSA for a quantized model.")
|
||||
@@ -132,6 +131,14 @@ def _verify_model_args(
|
||||
logger.warning_rank0("We should use slow tokenizer for the Yi models. Change `use_fast_tokenizer` to False.")
|
||||
model_args.use_fast_tokenizer = False
|
||||
|
||||
# Validate advanced training features
|
||||
if model_args.fp8 and model_args.quantization_bit is not None:
|
||||
raise ValueError("FP8 training is not compatible with quantization. Please disable one of them.")
|
||||
|
||||
if model_args.fp8_enable_fsdp_float8_all_gather and not model_args.fp8:
|
||||
logger.warning_rank0("fp8_enable_fsdp_float8_all_gather requires fp8=True. Setting fp8=True.")
|
||||
model_args.fp8 = True
|
||||
|
||||
|
||||
def _check_extra_dependencies(
|
||||
model_args: "ModelArguments",
|
||||
@@ -148,7 +155,7 @@ def _check_extra_dependencies(
|
||||
check_version("mixture-of-depth>=1.1.6", mandatory=True)
|
||||
|
||||
if model_args.infer_backend == EngineName.VLLM:
|
||||
check_version("vllm>=0.4.3,<=0.8.6")
|
||||
check_version("vllm>=0.4.3,<=0.10.2")
|
||||
check_version("vllm", mandatory=True)
|
||||
elif model_args.infer_backend == EngineName.SGLANG:
|
||||
check_version("sglang>=0.4.5")
|
||||
@@ -166,13 +173,17 @@ def _check_extra_dependencies(
|
||||
if finetuning_args.use_adam_mini:
|
||||
check_version("adam-mini", mandatory=True)
|
||||
|
||||
if finetuning_args.use_swanlab:
|
||||
check_version("swanlab", mandatory=True)
|
||||
|
||||
if finetuning_args.plot_loss:
|
||||
check_version("matplotlib", mandatory=True)
|
||||
|
||||
if training_args is not None:
|
||||
if training_args.deepspeed:
|
||||
# pin deepspeed version < 0.17 because of https://github.com/deepspeedai/DeepSpeed/issues/7347
|
||||
check_version("deepspeed>=0.10.0,<=0.16.9", mandatory=True)
|
||||
check_version("deepspeed", mandatory=True)
|
||||
check_version("deepspeed>=0.10.0,<=0.16.9")
|
||||
|
||||
if training_args.predict_with_generate:
|
||||
check_version("jieba", mandatory=True)
|
||||
@@ -303,6 +314,9 @@ def get_train_args(args: Optional[Union[dict[str, Any], list[str]]] = None) -> _
|
||||
if model_args.use_unsloth and is_deepspeed_zero3_enabled():
|
||||
raise ValueError("Unsloth is incompatible with DeepSpeed ZeRO-3.")
|
||||
|
||||
if data_args.neat_packing and is_transformers_version_greater_than("4.53.0"):
|
||||
raise ValueError("Neat packing is incompatible with transformers>=4.53.0.")
|
||||
|
||||
_set_env_vars()
|
||||
_verify_model_args(model_args, data_args, finetuning_args)
|
||||
_check_extra_dependencies(model_args, finetuning_args, training_args)
|
||||
@@ -348,6 +362,9 @@ def get_train_args(args: Optional[Union[dict[str, Any], list[str]]] = None) -> _
|
||||
# https://github.com/huggingface/transformers/blob/v4.50.0/src/transformers/trainer.py#L782
|
||||
training_args.label_names = training_args.label_names or ["labels"]
|
||||
|
||||
if "swanlab" in training_args.report_to and finetuning_args.use_swanlab:
|
||||
training_args.report_to.remove("swanlab")
|
||||
|
||||
if (
|
||||
training_args.parallel_mode == ParallelMode.DISTRIBUTED
|
||||
and training_args.ddp_find_unused_parameters is None
|
||||
|
||||
@@ -50,7 +50,7 @@ class RayArguments:
|
||||
default="PACK",
|
||||
metadata={"help": "The placement strategy for Ray training. Default is PACK."},
|
||||
)
|
||||
ray_init_kwargs: Optional[dict] = field(
|
||||
ray_init_kwargs: Optional[Union[dict, str]] = field(
|
||||
default=None,
|
||||
metadata={"help": "The arguments to pass to ray.init for Ray training. Default is None."},
|
||||
)
|
||||
@@ -59,10 +59,14 @@ class RayArguments:
|
||||
self.use_ray = use_ray()
|
||||
if isinstance(self.resources_per_worker, str) and self.resources_per_worker.startswith("{"):
|
||||
self.resources_per_worker = _convert_str_dict(json.loads(self.resources_per_worker))
|
||||
|
||||
if isinstance(self.ray_init_kwargs, str) and self.ray_init_kwargs.startswith("{"):
|
||||
self.ray_init_kwargs = _convert_str_dict(json.loads(self.ray_init_kwargs))
|
||||
|
||||
if self.ray_storage_filesystem is not None:
|
||||
if self.ray_storage_filesystem not in ["s3", "gs", "gcs"]:
|
||||
raise ValueError(
|
||||
f"ray_storage_filesystem must be one of ['s3', 'gs', 'gcs'], got {self.ray_storage_filesystem}"
|
||||
f"ray_storage_filesystem must be one of ['s3', 'gs', 'gcs'], got {self.ray_storage_filesystem}."
|
||||
)
|
||||
|
||||
import pyarrow.fs as fs
|
||||
|
||||
@@ -16,7 +16,7 @@ import re
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import torch
|
||||
from peft import LoraConfig, LoraModel, PeftModel, TaskType, get_peft_model
|
||||
from peft import LoraConfig, LoraModel, OFTConfig, PeftModel, TaskType, get_peft_model
|
||||
from transformers.integrations import is_deepspeed_zero3_enabled
|
||||
|
||||
from ..extras import logging
|
||||
@@ -147,7 +147,10 @@ def _setup_lora_tuning(
|
||||
cast_trainable_params_to_fp32: bool,
|
||||
) -> "PeftModel":
|
||||
if is_trainable:
|
||||
logger.info_rank0("Fine-tuning method: {}".format("DoRA" if finetuning_args.use_dora else "LoRA"))
|
||||
if finetuning_args.finetuning_type == "oft":
|
||||
logger.info_rank0("Fine-tuning method: OFT")
|
||||
else:
|
||||
logger.info_rank0("Fine-tuning method: {}".format("DoRA" if finetuning_args.use_dora else "LoRA"))
|
||||
|
||||
adapter_to_resume = None
|
||||
|
||||
@@ -188,7 +191,7 @@ def _setup_lora_tuning(
|
||||
|
||||
if adapter_to_resume is not None: # resume lora training
|
||||
if model_args.use_unsloth:
|
||||
model = load_unsloth_peft_model(config, model_args, is_trainable=is_trainable)
|
||||
model = load_unsloth_peft_model(config, model_args, finetuning_args, is_trainable=is_trainable)
|
||||
else:
|
||||
model = PeftModel.from_pretrained(model, adapter_to_resume, is_trainable=is_trainable, **init_kwargs)
|
||||
|
||||
@@ -223,17 +226,29 @@ def _setup_lora_tuning(
|
||||
finetuning_args.additional_target = module_names
|
||||
logger.warning_rank0("Vocab has been resized, add {} to trainable params.".format(",".join(module_names)))
|
||||
|
||||
peft_kwargs = {
|
||||
"r": finetuning_args.lora_rank,
|
||||
"target_modules": target_modules,
|
||||
"lora_alpha": finetuning_args.lora_alpha,
|
||||
"lora_dropout": finetuning_args.lora_dropout,
|
||||
"use_rslora": finetuning_args.use_rslora,
|
||||
"use_dora": finetuning_args.use_dora,
|
||||
"modules_to_save": finetuning_args.additional_target,
|
||||
}
|
||||
if finetuning_args.finetuning_type == "lora":
|
||||
peft_kwargs = {
|
||||
"r": finetuning_args.lora_rank,
|
||||
"target_modules": target_modules,
|
||||
"lora_alpha": finetuning_args.lora_alpha,
|
||||
"lora_dropout": finetuning_args.lora_dropout,
|
||||
"use_rslora": finetuning_args.use_rslora,
|
||||
"use_dora": finetuning_args.use_dora,
|
||||
"modules_to_save": finetuning_args.additional_target,
|
||||
}
|
||||
elif finetuning_args.finetuning_type == "oft":
|
||||
peft_kwargs = {
|
||||
"r": finetuning_args.oft_rank,
|
||||
"oft_block_size": finetuning_args.oft_block_size,
|
||||
"target_modules": target_modules,
|
||||
"module_dropout": finetuning_args.module_dropout,
|
||||
"modules_to_save": finetuning_args.additional_target,
|
||||
}
|
||||
|
||||
if model_args.use_unsloth:
|
||||
if finetuning_args.finetuning_type == "oft":
|
||||
raise ValueError("Unsloth is currently not supported for OFT.")
|
||||
|
||||
model = get_unsloth_peft_model(model, model_args, peft_kwargs)
|
||||
else:
|
||||
if finetuning_args.pissa_init:
|
||||
@@ -244,12 +259,19 @@ def _setup_lora_tuning(
|
||||
logger.info_rank0(f"Using PiSSA initialization with FSVD steps {finetuning_args.pissa_iter}.")
|
||||
peft_kwargs["init_lora_weights"] = f"pissa_niter_{finetuning_args.pissa_iter}"
|
||||
|
||||
lora_config = LoraConfig(
|
||||
task_type=TaskType.CAUSAL_LM,
|
||||
inference_mode=False,
|
||||
**peft_kwargs,
|
||||
)
|
||||
model = get_peft_model(model, lora_config)
|
||||
if finetuning_args.finetuning_type == "lora":
|
||||
peft_config = LoraConfig(
|
||||
task_type=TaskType.CAUSAL_LM,
|
||||
inference_mode=False,
|
||||
**peft_kwargs,
|
||||
)
|
||||
elif finetuning_args.finetuning_type == "oft":
|
||||
peft_config = OFTConfig(
|
||||
task_type=TaskType.CAUSAL_LM,
|
||||
inference_mode=False,
|
||||
**peft_kwargs,
|
||||
)
|
||||
model = get_peft_model(model, peft_config)
|
||||
|
||||
if is_trainable and cast_trainable_params_to_fp32:
|
||||
for param in filter(lambda p: p.requires_grad, model.parameters()):
|
||||
@@ -272,8 +294,8 @@ def init_adapter(
|
||||
Note that the trainable parameters must be cast to float32.
|
||||
"""
|
||||
if is_trainable and getattr(model, "quantization_method", None) is not None:
|
||||
if finetuning_args.finetuning_type != "lora":
|
||||
raise ValueError("Quantized models can only be used for the LoRA tuning.")
|
||||
if finetuning_args.finetuning_type not in ["lora", "oft"]:
|
||||
raise ValueError("Quantized models can only be used for the LoRA or OFT tuning.")
|
||||
|
||||
if finetuning_args.pissa_init:
|
||||
raise ValueError("Cannot initialize PiSSA adapter on quantized models.")
|
||||
@@ -296,7 +318,7 @@ def init_adapter(
|
||||
_setup_full_tuning(model, finetuning_args, is_trainable, cast_trainable_params_to_fp32)
|
||||
elif finetuning_args.finetuning_type == "freeze":
|
||||
_setup_freeze_tuning(model, finetuning_args, is_trainable, cast_trainable_params_to_fp32)
|
||||
elif finetuning_args.finetuning_type == "lora":
|
||||
elif finetuning_args.finetuning_type in ["lora", "oft"]:
|
||||
model = _setup_lora_tuning(
|
||||
config, model, model_args, finetuning_args, is_trainable, cast_trainable_params_to_fp32
|
||||
)
|
||||
|
||||
@@ -19,6 +19,7 @@ import torch
|
||||
from transformers import (
|
||||
AutoConfig,
|
||||
AutoModelForCausalLM,
|
||||
AutoModelForImageTextToText,
|
||||
AutoModelForSeq2SeqLM,
|
||||
AutoModelForTextToWaveform,
|
||||
AutoModelForVision2Seq,
|
||||
@@ -29,7 +30,6 @@ from trl import AutoModelForCausalLMWithValueHead
|
||||
|
||||
from ..extras import logging
|
||||
from ..extras.misc import count_parameters, skip_check_imports, try_download_model_from_other_hub
|
||||
from ..extras.packages import is_transformers_version_greater_than
|
||||
from .adapter import init_adapter
|
||||
from .model_utils.liger_kernel import apply_liger_kernel
|
||||
from .model_utils.misc import register_autoclass
|
||||
@@ -39,10 +39,6 @@ from .model_utils.valuehead import load_valuehead_params
|
||||
from .patcher import patch_config, patch_model, patch_processor, patch_tokenizer, patch_valuehead_model
|
||||
|
||||
|
||||
if is_transformers_version_greater_than("4.46.0"):
|
||||
from transformers import AutoModelForImageTextToText
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from transformers import PretrainedConfig, PreTrainedModel, PreTrainedTokenizer, ProcessorMixin
|
||||
|
||||
@@ -111,9 +107,8 @@ def load_tokenizer(model_args: "ModelArguments") -> "TokenizerModule":
|
||||
**init_kwargs,
|
||||
)
|
||||
except Exception as e:
|
||||
raise OSError("Failed to load processor.") from e
|
||||
|
||||
patch_processor(processor, tokenizer, model_args)
|
||||
logger.info_rank0(f"Failed to load processor: {e}.")
|
||||
processor = None
|
||||
|
||||
# Avoid load tokenizer, see:
|
||||
# https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/models/auto/processing_auto.py#L324
|
||||
@@ -121,6 +116,9 @@ def load_tokenizer(model_args: "ModelArguments") -> "TokenizerModule":
|
||||
logger.debug("The loaded processor is not an instance of Processor. Dropping it.")
|
||||
processor = None
|
||||
|
||||
if processor is not None:
|
||||
patch_processor(processor, tokenizer, model_args)
|
||||
|
||||
return {"tokenizer": tokenizer, "processor": processor}
|
||||
|
||||
|
||||
@@ -158,16 +156,13 @@ def load_model(
|
||||
if model_args.mixture_of_depths == "load":
|
||||
model = load_mod_pretrained_model(**init_kwargs)
|
||||
else:
|
||||
if type(config) in AutoModelForVision2Seq._model_mapping.keys(): # image-text
|
||||
load_class = AutoModelForVision2Seq
|
||||
elif (
|
||||
is_transformers_version_greater_than("4.46.0")
|
||||
and type(config) in AutoModelForImageTextToText._model_mapping.keys()
|
||||
): # image-text
|
||||
if type(config) in AutoModelForImageTextToText._model_mapping.keys(): # image-text
|
||||
load_class = AutoModelForImageTextToText
|
||||
elif type(config) in AutoModelForVision2Seq._model_mapping.keys(): # image-text
|
||||
load_class = AutoModelForVision2Seq
|
||||
elif type(config) in AutoModelForSeq2SeqLM._model_mapping.keys(): # audio-text
|
||||
load_class = AutoModelForSeq2SeqLM
|
||||
elif type(config) in AutoModelForTextToWaveform._model_mapping.keys(): # audio hack for qwen2_5_omni
|
||||
elif type(config) in AutoModelForTextToWaveform._model_mapping.keys(): # audio hack for qwen omni
|
||||
load_class = AutoModelForTextToWaveform
|
||||
else:
|
||||
load_class = AutoModelForCausalLM
|
||||
@@ -176,8 +171,8 @@ def load_model(
|
||||
model = load_class.from_config(config, trust_remote_code=model_args.trust_remote_code)
|
||||
else:
|
||||
model = load_class.from_pretrained(**init_kwargs)
|
||||
if getattr(model.config, "model_type", None) == "qwen2_5_omni":
|
||||
model = model.thinker # use part of Omni model
|
||||
if getattr(model.config, "model_type", None) in ["qwen2_5_omni", "qwen3_omni_moe"]:
|
||||
model = getattr(model, "thinker")
|
||||
|
||||
if model_args.mixture_of_depths == "convert":
|
||||
model = convert_pretrained_model_to_mod(model, config, model_args)
|
||||
|
||||
@@ -67,4 +67,5 @@ def resize_embedding_layer(model: "PreTrainedModel", tokenizer: "PreTrainedToken
|
||||
_noisy_mean_initialization(model.get_input_embeddings().weight.data, num_new_tokens)
|
||||
_noisy_mean_initialization(model.get_output_embeddings().weight.data, num_new_tokens)
|
||||
|
||||
model.config.vocab_size = new_embedding_size
|
||||
logger.info_rank0(f"Resized token embeddings from {current_embedding_size} to {new_embedding_size}.")
|
||||
|
||||
@@ -47,6 +47,8 @@ def apply_liger_kernel(
|
||||
from liger_kernel.transformers import apply_liger_kernel_to_gemma3_text as apply_liger_kernel
|
||||
elif model_type == "glm4":
|
||||
from liger_kernel.transformers import apply_liger_kernel_to_glm4 as apply_liger_kernel
|
||||
elif model_type == "glm4v":
|
||||
from liger_kernel.transformers import apply_liger_kernel_to_glm4v as apply_liger_kernel
|
||||
elif model_type == "granite":
|
||||
from liger_kernel.transformers import apply_liger_kernel_to_granite as apply_liger_kernel
|
||||
elif model_type == "llama":
|
||||
|
||||
@@ -39,6 +39,9 @@ def add_z3_leaf_module(model: "PreTrainedModel") -> None:
|
||||
return
|
||||
|
||||
model_type = getattr(model.config, "model_type", None)
|
||||
text_config = getattr(model.config, "text_config", None)
|
||||
text_architectures = getattr(text_config, "architectures", None)
|
||||
|
||||
if model_type == "dbrx":
|
||||
from transformers.models.dbrx.modeling_dbrx import DbrxFFN
|
||||
|
||||
@@ -57,6 +60,16 @@ def add_z3_leaf_module(model: "PreTrainedModel") -> None:
|
||||
|
||||
_set_z3_leaf_modules(model, [GraniteMoeMoE])
|
||||
|
||||
if model_type == "glm4_moe":
|
||||
from transformers.models.glm4_moe.modeling_glm4_moe import Glm4MoeMoE
|
||||
|
||||
_set_z3_leaf_modules(model, [Glm4MoeMoE])
|
||||
|
||||
if model_type == "glm4v_moe":
|
||||
from transformers.models.glm4v_moe.modeling_glm4v_moe import Glm4vMoeTextMoE
|
||||
|
||||
_set_z3_leaf_modules(model, [Glm4vMoeTextMoE])
|
||||
|
||||
if model_type == "jamba":
|
||||
from transformers.models.jamba.modeling_jamba import JambaSparseMoeBlock
|
||||
|
||||
@@ -92,7 +105,7 @@ def add_z3_leaf_module(model: "PreTrainedModel") -> None:
|
||||
|
||||
_set_z3_leaf_modules(model, [Qwen2MoeSparseMoeBlock])
|
||||
|
||||
if model_type == "qwen3_moe":
|
||||
if model_type == "qwen3_moe" or text_architectures == "Qwen3MoeForCausalLM":
|
||||
from transformers.models.qwen3_moe.modeling_qwen3_moe import Qwen3MoeSparseMoeBlock
|
||||
|
||||
_set_z3_leaf_modules(model, [Qwen3MoeSparseMoeBlock])
|
||||
@@ -103,6 +116,8 @@ def configure_moe(config: "PretrainedConfig", model_args: "ModelArguments", is_t
|
||||
return
|
||||
|
||||
model_type = getattr(config, "model_type", None)
|
||||
text_config = getattr(config, "text_config", None) # for multimodal model
|
||||
|
||||
if model_type in [
|
||||
"dbrx",
|
||||
"granitemoe",
|
||||
@@ -117,9 +132,18 @@ def configure_moe(config: "PretrainedConfig", model_args: "ModelArguments", is_t
|
||||
]:
|
||||
setattr(config, "output_router_logits", True)
|
||||
|
||||
if text_config and getattr(text_config, "model_type", None) in [
|
||||
"glm4v_moe_text", # glmv4_5
|
||||
"qwen3_moe", # internvl_3_5
|
||||
]:
|
||||
setattr(text_config, "output_router_logits", True)
|
||||
|
||||
if model_type in ["granitemoe", "jamba", "llama4", "mixtral", "olmoe", "phimoe", "qwen2_moe", "qwen3_moe"]:
|
||||
setattr(config, "router_aux_loss_coef", model_args.moe_aux_loss_coef)
|
||||
|
||||
elif text_config and getattr(text_config, "model_type", None) in ["qwen3_moe"]:
|
||||
setattr(text_config, "router_aux_loss_coef", model_args.moe_aux_loss_coef)
|
||||
|
||||
elif model_type == "deepseek":
|
||||
setattr(config, "aux_loss_alpha", model_args.moe_aux_loss_coef)
|
||||
|
||||
|
||||
@@ -90,12 +90,13 @@ def configure_quantization(
|
||||
if model_args.quantization_bit is not None:
|
||||
logger.warning_rank0("`quantization_bit` will not affect on the PTQ-quantized models.")
|
||||
|
||||
if is_deepspeed_zero3_enabled() or is_fsdp_enabled():
|
||||
raise ValueError("DeepSpeed ZeRO-3 or FSDP is incompatible with PTQ-quantized models.")
|
||||
|
||||
quantization_config: dict[str, Any] = getattr(config, "quantization_config", None)
|
||||
quant_method = quantization_config.get("quant_method", "")
|
||||
|
||||
if quant_method != QuantizationMethod.MXFP4 and (is_deepspeed_zero3_enabled() or is_fsdp_enabled()):
|
||||
# mxfp4 will dequant the model weights
|
||||
raise ValueError("DeepSpeed ZeRO-3 or FSDP is incompatible with PTQ-quantized models.")
|
||||
|
||||
if quant_method == QuantizationMethod.GPTQ:
|
||||
check_version("gptqmodel>=2.0.0", mandatory=True)
|
||||
quantization_config.pop("disable_exllama", None) # remove deprecated args
|
||||
|
||||
131
src/llamafactory/model/model_utils/sdpa_npu_redirect.py
Normal file
@@ -0,0 +1,131 @@
|
||||
# Copyright 2025 the LlamaFactory team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
from transformers.utils import is_torch_npu_available
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_ORIG_SDPA = F.scaled_dot_product_attention
|
||||
|
||||
|
||||
def _to_bool_4d_mask(
|
||||
attn_mask: Optional[torch.Tensor], q_len: int, kv_len: int, device: torch.device
|
||||
) -> Optional[torch.Tensor]:
|
||||
"""Normalize additive/other Hugging Face masks into a boolean mask of shape [B, 1, Q, K] (True = masked)."""
|
||||
if attn_mask is None:
|
||||
return None
|
||||
if attn_mask.dtype != torch.bool:
|
||||
attn_mask = attn_mask < 0 # additive -inf -> True
|
||||
if attn_mask.dim() == 4:
|
||||
return attn_mask[..., :q_len, :kv_len].contiguous()
|
||||
if attn_mask.dim() == 3:
|
||||
return attn_mask[:, None, :q_len, :kv_len].contiguous()
|
||||
if attn_mask.dim() == 2:
|
||||
return attn_mask[:, None, None, :kv_len].expand(-1, 1, q_len, -1).contiguous()
|
||||
return attn_mask.to(device)
|
||||
|
||||
|
||||
def _merge_causal_mask(
|
||||
attn_mask: Optional[torch.Tensor], is_causal: bool, L: int, S: int, device: torch.device
|
||||
) -> Optional[torch.Tensor]:
|
||||
"""Merge `is_causal` into the boolean/additive attention mask (True = masked)."""
|
||||
if not is_causal or L != S:
|
||||
return attn_mask
|
||||
causal_bool = torch.ones((1, 1, L, L), dtype=torch.bool, device=device).triu(1)
|
||||
if attn_mask is None:
|
||||
return causal_bool
|
||||
if attn_mask.dtype != torch.bool:
|
||||
attn_mask = attn_mask < 0
|
||||
if attn_mask.dim() == 2:
|
||||
attn_mask = attn_mask[:, None, None, :L].expand(-1, 1, L, -1).contiguous()
|
||||
elif attn_mask.dim() == 3:
|
||||
attn_mask = attn_mask[:, None, :L, :L].contiguous()
|
||||
return attn_mask | causal_bool
|
||||
|
||||
|
||||
def _sdpa_npu_redirect(
|
||||
q: torch.Tensor,
|
||||
k: torch.Tensor,
|
||||
v: torch.Tensor,
|
||||
attn_mask: Optional[torch.Tensor] = None,
|
||||
dropout_p: float = 0.0,
|
||||
is_causal: bool = False,
|
||||
scale: Optional[float] = None,
|
||||
):
|
||||
"""A drop-in replacement for `F.scaled_dot_product_attention`.
|
||||
|
||||
Automatically falls back to the native SDPA when conditions are not met.
|
||||
The NPU-fused path is only enabled when q/k/v have shape (B, N, S, D); otherwise, it falls back.
|
||||
"""
|
||||
# Fall back if the feature is disabled or the conditions are not satisfied.
|
||||
if os.environ.get("NPU_FA_DISABLE", "0") == "1":
|
||||
return _ORIG_SDPA(q, k, v, attn_mask=attn_mask, dropout_p=dropout_p, is_causal=is_causal, scale=scale)
|
||||
|
||||
npu_ok = is_torch_npu_available() and (q.device.type == "npu")
|
||||
dtype_ok = q.dtype in (torch.float16, torch.bfloat16)
|
||||
shape_ok = q.dim() == 4 and k.dim() == 4 and v.dim() == 4 # 期望 BNSD
|
||||
if not (npu_ok and dtype_ok and shape_ok):
|
||||
return _ORIG_SDPA(q, k, v, attn_mask=attn_mask, dropout_p=dropout_p, is_causal=is_causal, scale=scale)
|
||||
|
||||
L, S = q.size(-2), k.size(-2)
|
||||
merged_mask = _merge_causal_mask(attn_mask, is_causal, L, S, q.device)
|
||||
mask_bool = _to_bool_4d_mask(merged_mask, q_len=L, kv_len=S, device=q.device)
|
||||
|
||||
head_dim = q.size(-1)
|
||||
sc = (1.0 / math.sqrt(head_dim)) if (scale is None) else scale
|
||||
|
||||
train_mode = torch.is_grad_enabled() and (dropout_p > 0)
|
||||
keep_prob = 1.0 - (dropout_p if train_mode else 0.0)
|
||||
|
||||
try:
|
||||
import torch_npu
|
||||
|
||||
out = torch_npu.npu_fusion_attention(
|
||||
q.contiguous(),
|
||||
k.contiguous(),
|
||||
v.contiguous(),
|
||||
head_num=q.size(-3), # N
|
||||
input_layout="BNSD", # (B, N, S, D)
|
||||
pse=None,
|
||||
atten_mask=mask_bool, # True = masked
|
||||
scale=sc,
|
||||
pre_tockens=2147483647,
|
||||
next_tockens=2147483647,
|
||||
keep_prob=keep_prob,
|
||||
sync=False,
|
||||
inner_precise=0,
|
||||
)[0]
|
||||
return out
|
||||
except Exception as e:
|
||||
if os.environ.get("NPU_FA_VERBOSE", "0") == "1":
|
||||
logger.warning(f"[sdpa_npu_redirect] npu_fusion_attention failed: {e}; fallback to SDPA.")
|
||||
return _ORIG_SDPA(q, k, v, attn_mask=attn_mask, dropout_p=dropout_p, is_causal=is_causal, scale=scale)
|
||||
|
||||
|
||||
def apply_sdpa_npu_redirect(verbose: bool = True):
|
||||
"""Install the redirection by pointing `F.scaled_dot_product_attention` to our implementation."""
|
||||
if getattr(F.scaled_dot_product_attention, "__wrapped_by_npu__", False):
|
||||
return
|
||||
F.scaled_dot_product_attention = _sdpa_npu_redirect
|
||||
setattr(F.scaled_dot_product_attention, "__wrapped_by_npu__", True)
|
||||
if verbose:
|
||||
logger.info("[sdpa_npu_redirect] SDPA has been redirected to Ascend npu_fusion_attention when available.")
|
||||
@@ -80,12 +80,15 @@ def get_unsloth_peft_model(
|
||||
|
||||
|
||||
def load_unsloth_peft_model(
|
||||
config: "PretrainedConfig", model_args: "ModelArguments", is_trainable: bool
|
||||
config: "PretrainedConfig",
|
||||
model_args: "ModelArguments",
|
||||
finetuning_args: "FinetuningArguments",
|
||||
is_trainable: bool,
|
||||
) -> "PreTrainedModel":
|
||||
r"""Load peft model with unsloth. Used in both training and inference."""
|
||||
from unsloth import FastLanguageModel # type: ignore
|
||||
|
||||
unsloth_kwargs = _get_unsloth_kwargs(config, model_args.adapter_name_or_path[0], model_args)
|
||||
unsloth_kwargs = _get_unsloth_kwargs(config, model_args.adapter_name_or_path[0], model_args, finetuning_args)
|
||||
try:
|
||||
if not is_trainable:
|
||||
unsloth_kwargs["use_gradient_checkpointing"] = False
|
||||
|
||||
@@ -49,7 +49,7 @@ def load_valuehead_params(path_or_repo_id: str, model_args: "ModelArguments") ->
|
||||
|
||||
try:
|
||||
vhead_file = cached_file(filename=V_HEAD_WEIGHTS_NAME, **kwargs)
|
||||
return torch.load(vhead_file, map_location="cpu")
|
||||
return torch.load(vhead_file, map_location="cpu", weights_only=True)
|
||||
except Exception as err:
|
||||
err_text = str(err)
|
||||
|
||||
|
||||
@@ -199,15 +199,67 @@ def patch_target_modules(
|
||||
return target_modules
|
||||
|
||||
|
||||
_register_composite_model(
|
||||
model_type="dots_ocr",
|
||||
projector_key="vision_tower.merger",
|
||||
vision_model_keys=["vision_tower"],
|
||||
language_model_keys=["model", "lm_head"],
|
||||
lora_conflict_keys=["merger"],
|
||||
)
|
||||
|
||||
|
||||
_register_composite_model(
|
||||
model_type="gemma3",
|
||||
)
|
||||
|
||||
|
||||
_register_composite_model(
|
||||
model_type="gemma3n",
|
||||
vision_model_keys=["vision_tower", "audio_tower"],
|
||||
lora_conflict_keys=["timm_model", "subsample_conv_projection"],
|
||||
)
|
||||
|
||||
|
||||
# copied from qwen2vl
|
||||
_register_composite_model(
|
||||
model_type="glm4v",
|
||||
projector_key="visual.merger",
|
||||
vision_model_keys=["visual.patch_embed", "visual.blocks"],
|
||||
language_model_keys=["language_model", "lm_head"],
|
||||
lora_conflict_keys=["patch_embed"],
|
||||
)
|
||||
|
||||
|
||||
_register_composite_model(
|
||||
model_type="glm4v_moe",
|
||||
projector_key="visual.merger",
|
||||
vision_model_keys=["visual.patch_embed", "visual.blocks"],
|
||||
language_model_keys=["language_model", "lm_head"],
|
||||
lora_conflict_keys=["patch_embed"],
|
||||
)
|
||||
|
||||
|
||||
_register_composite_model(
|
||||
model_type="internvl",
|
||||
)
|
||||
|
||||
_register_composite_model(
|
||||
model_type="interns1",
|
||||
)
|
||||
|
||||
_register_composite_model(
|
||||
model_type="Keye",
|
||||
projector_key="mlp_AR",
|
||||
vision_model_keys=["visual.vision_model.patch_embedding", "visual.vision_model.encoder"],
|
||||
language_model_keys=["model", "lm_head"],
|
||||
lora_conflict_keys=["patch_embedding"],
|
||||
)
|
||||
|
||||
|
||||
_register_composite_model(
|
||||
model_type="kimi_vl",
|
||||
)
|
||||
|
||||
|
||||
_register_composite_model(
|
||||
model_type="llama4",
|
||||
@@ -246,6 +298,7 @@ _register_composite_model(
|
||||
lora_conflict_keys=["audio_projection_layer"],
|
||||
)
|
||||
|
||||
|
||||
_register_composite_model(
|
||||
model_type="mistral3",
|
||||
)
|
||||
@@ -299,6 +352,33 @@ _register_composite_model(
|
||||
)
|
||||
|
||||
|
||||
_register_composite_model(
|
||||
model_type="qwen3_vl",
|
||||
projector_key="visual.merger",
|
||||
vision_model_keys=["visual.patch_embed", "visual.blocks"],
|
||||
language_model_keys=["language_model", "lm_head"],
|
||||
lora_conflict_keys=["patch_embed"],
|
||||
)
|
||||
|
||||
|
||||
_register_composite_model(
|
||||
model_type="qwen3_vl_moe",
|
||||
projector_key="visual.merger",
|
||||
vision_model_keys=["visual.patch_embed", "visual.blocks"],
|
||||
language_model_keys=["language_model", "lm_head"],
|
||||
lora_conflict_keys=["patch_embed"],
|
||||
)
|
||||
|
||||
|
||||
_register_composite_model(
|
||||
model_type="qwen3_omni_moe_thinker",
|
||||
projector_key="visual.merger",
|
||||
vision_model_keys=["visual.patch_embed", "visual.blocks", "audio_tower"],
|
||||
language_model_keys=["model", "lm_head"],
|
||||
lora_conflict_keys=["patch_embed"],
|
||||
)
|
||||
|
||||
|
||||
_register_composite_model(
|
||||
model_type="video_llava",
|
||||
)
|
||||
|
||||
@@ -178,6 +178,9 @@ def patch_model(
|
||||
resize_embedding_layer(model, tokenizer)
|
||||
|
||||
if is_trainable:
|
||||
if getattr(model.config, "model_type", None) == "gemma3n":
|
||||
setattr(model_args, "disable_gradient_checkpointing", True)
|
||||
|
||||
prepare_model_for_training(model, model_args)
|
||||
autocast_projector_dtype(model, model_args)
|
||||
add_z3_leaf_module(model)
|
||||
@@ -185,6 +188,23 @@ def patch_model(
|
||||
if not model_args.use_unsloth:
|
||||
print_attn_implementation(model.config)
|
||||
|
||||
# ======== NPU fused attention redirect: SDPA -> torch_npu.npu_fusion_attention ========
|
||||
# Place after all structural modifications and before DeepSpeed/Trainer initialization;
|
||||
# does not modify any Module/_parameters, safe for ZeRO-3 + offload.
|
||||
try:
|
||||
import os
|
||||
|
||||
import torch
|
||||
|
||||
if hasattr(torch, "npu") and torch.npu.is_available() and os.environ.get("NPU_FA_DISABLE", "0") != "1":
|
||||
from .model_utils.sdpa_npu_redirect import apply_sdpa_npu_redirect
|
||||
|
||||
apply_sdpa_npu_redirect(verbose=not model_args.use_unsloth)
|
||||
logger.info_rank0("[sdpa_npu_redirect] Enabled: SDPA will use Ascend npu_fusion_attention when available.")
|
||||
except Exception as e:
|
||||
logger.warning_rank0(f"[sdpa_npu_redirect] Failed to enable redirect, will keep native SDPA. Reason: {e}")
|
||||
# =====================================================================================
|
||||
|
||||
try:
|
||||
model.add_model_tags(["llama-factory"])
|
||||
except Exception:
|
||||
@@ -208,9 +228,23 @@ def patch_valuehead_model(model: "AutoModelForCausalLMWithValueHead") -> None:
|
||||
if isinstance(self.pretrained_model, PeftModel):
|
||||
self.pretrained_model.create_or_update_model_card(output_dir)
|
||||
|
||||
def get_rope_index_func(self: "AutoModelForCausalLMWithValueHead"):
|
||||
if isinstance(self.pretrained_model, PeftModel):
|
||||
base_model = self.pretrained_model.base_model.model
|
||||
else:
|
||||
base_model = self.pretrained_model
|
||||
|
||||
if base_model and hasattr(base_model, "get_rope_index"):
|
||||
return base_model.get_rope_index
|
||||
elif base_model and hasattr(base_model, "model") and hasattr(base_model.model, "get_rope_index"):
|
||||
return base_model.model.get_rope_index
|
||||
else:
|
||||
return None
|
||||
|
||||
ignore_modules = [name for name, _ in model.named_parameters() if "pretrained_model" in name]
|
||||
setattr(model, "_keys_to_ignore_on_save", ignore_modules)
|
||||
setattr(model, "tie_weights", MethodType(tie_weights, model))
|
||||
setattr(model, "get_input_embeddings", MethodType(get_input_embeddings, model))
|
||||
setattr(model, "get_output_embeddings", MethodType(get_output_embeddings, model))
|
||||
setattr(model, "get_rope_index", get_rope_index_func(model))
|
||||
setattr(model, "create_or_update_model_card", MethodType(create_or_update_model_card, model))
|
||||
|
||||
@@ -73,10 +73,10 @@ def fix_valuehead_checkpoint(
|
||||
if safe_serialization:
|
||||
path_to_checkpoint = os.path.join(output_dir, SAFE_WEIGHTS_NAME)
|
||||
with safe_open(path_to_checkpoint, framework="pt", device="cpu") as f:
|
||||
state_dict: dict[str, torch.Tensor] = {key: f.get_tensor(key) for key in f.keys()}
|
||||
state_dict: dict[str, torch.Tensor] = {key: f.get_tensor(key).clone() for key in f.keys()}
|
||||
else:
|
||||
path_to_checkpoint = os.path.join(output_dir, WEIGHTS_NAME)
|
||||
state_dict: dict[str, torch.Tensor] = torch.load(path_to_checkpoint, map_location="cpu")
|
||||
state_dict: dict[str, torch.Tensor] = torch.load(path_to_checkpoint, map_location="cpu", weights_only=True)
|
||||
|
||||
os.remove(path_to_checkpoint)
|
||||
decoder_state_dict, v_head_state_dict = {}, {}
|
||||
|
||||
@@ -78,6 +78,7 @@ class CustomDPOTrainer(DPOTrainer):
|
||||
self.beta = finetuning_args.pref_beta
|
||||
self.loss_type = finetuning_args.pref_loss
|
||||
self.ftx_gamma = finetuning_args.pref_ftx
|
||||
self.bco_gemma = finetuning_args.pref_bco_weight
|
||||
self.label_smoothing = finetuning_args.dpo_label_smoothing
|
||||
self.simpo_gamma = finetuning_args.simpo_gamma
|
||||
self.ld_alpha = finetuning_args.ld_alpha
|
||||
@@ -108,6 +109,11 @@ class CustomDPOTrainer(DPOTrainer):
|
||||
self.accelerator.clip_grad_norm_ = MethodType(clip_grad_norm_old_version, self.accelerator)
|
||||
self.add_callback(BAdamCallback)
|
||||
|
||||
if self.bco_gemma >= 1e-6:
|
||||
from trl.trainer import RunningMoments
|
||||
|
||||
self.running = RunningMoments(self.accelerator)
|
||||
|
||||
@override
|
||||
def create_optimizer(self) -> "torch.optim.Optimizer":
|
||||
if self.optimizer is None:
|
||||
@@ -151,6 +157,25 @@ class CustomDPOTrainer(DPOTrainer):
|
||||
simpo_loss = -F.logsigmoid(self.beta * logits)
|
||||
return simpo_loss
|
||||
|
||||
def bco_loss(
|
||||
self,
|
||||
chosen_logps: "torch.Tensor",
|
||||
rejected_logps: "torch.Tensor",
|
||||
reference_chosen_logps: "torch.Tensor",
|
||||
reference_rejected_logps: "torch.Tensor",
|
||||
) -> "torch.Tensor":
|
||||
chosen_logratios = chosen_logps - reference_chosen_logps
|
||||
rejected_logratios = rejected_logps - reference_rejected_logps
|
||||
chosen_rewards = self.beta * chosen_logratios
|
||||
rejected_rewards = self.beta * rejected_logratios
|
||||
rewards = torch.cat((chosen_rewards, rejected_rewards), 0).mean().detach()
|
||||
self.running.update(rewards) # update baseline
|
||||
delta = self.running.mean
|
||||
bco_loss = -F.logsigmoid((self.beta * chosen_logratios) - delta) - F.logsigmoid(
|
||||
-(self.beta * rejected_logratios - delta)
|
||||
)
|
||||
return bco_loss
|
||||
|
||||
def compute_preference_loss(
|
||||
self,
|
||||
policy_chosen_logps: "torch.Tensor",
|
||||
@@ -174,6 +199,12 @@ class CustomDPOTrainer(DPOTrainer):
|
||||
policy_chosen_logps, policy_rejected_logps, reference_chosen_logps, reference_rejected_logps
|
||||
)
|
||||
|
||||
if self.bco_gemma > 1e-6:
|
||||
bco_losses = self.bco_loss(
|
||||
policy_chosen_logps, policy_rejected_logps, reference_chosen_logps, reference_rejected_logps
|
||||
)
|
||||
losses += bco_losses * self.bco_gemma
|
||||
|
||||
return losses, chosen_rewards, rejected_rewards
|
||||
|
||||
@override
|
||||
@@ -253,6 +284,9 @@ class CustomDPOTrainer(DPOTrainer):
|
||||
sft_loss = -policy_chosen_logps_avg
|
||||
if self.ftx_gamma > 1e-6:
|
||||
losses += self.ftx_gamma * sft_loss
|
||||
if self.bco_gemma > 1e-6:
|
||||
# re-weigthing for MPO
|
||||
losses /= self.ftx_gamma + self.bco_gemma + 1.0
|
||||
|
||||
prefix = "eval_" if train_eval == "eval" else ""
|
||||
metrics[f"{prefix}rewards/chosen"] = chosen_rewards.mean().item()
|
||||
|
||||
171
src/llamafactory/train/fp8_utils.py
Normal file
@@ -0,0 +1,171 @@
|
||||
# Copyright 2025 the LlamaFactory team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import TYPE_CHECKING, Any, Optional
|
||||
|
||||
from ..extras import logging
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ..hparams import ModelArguments
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
|
||||
def create_fp8_kwargs(model_args: "ModelArguments") -> list[Any]:
|
||||
"""Create AORecipeKwargs for FP8 training with HuggingFace Accelerate.
|
||||
|
||||
Args:
|
||||
model_args: Model arguments containing FP8 configuration
|
||||
|
||||
Returns:
|
||||
List containing AORecipeKwargs if FP8 is enabled and supported, empty list otherwise
|
||||
"""
|
||||
if not model_args.fp8:
|
||||
return []
|
||||
|
||||
try:
|
||||
# Check if AORecipeKwargs is available (Accelerate 1.8.0+)
|
||||
from accelerate.utils import AORecipeKwargs
|
||||
|
||||
backend = getattr(model_args, "fp8_backend", "auto")
|
||||
logger.info_rank0(f"Creating FP8 configuration with backend: {backend}")
|
||||
|
||||
# Create Float8LinearConfig if torchao backend is used
|
||||
config = None
|
||||
if backend == "torchao" or backend == "auto":
|
||||
from torchao.float8 import Float8LinearConfig
|
||||
|
||||
# Use rowwise scaling for better performance (as recommended by torchao)
|
||||
# Configure alignment requirements for FP8 kernels
|
||||
config = Float8LinearConfig.from_recipe_name("rowwise")
|
||||
|
||||
# Enable alignment for better kernel performance
|
||||
if hasattr(config, "enable_amax_init"):
|
||||
config.enable_amax_init = True
|
||||
if hasattr(config, "enable_pre_and_post_forward"):
|
||||
config.enable_pre_and_post_forward = True
|
||||
|
||||
# Create module filter function to skip problematic layers
|
||||
# TorchAO FP8 requires dimensions divisible by 16 for optimal kernels
|
||||
def module_filter_func(module, layer_name):
|
||||
# Skip embedding and output layers for numerical stability
|
||||
skip_layers = ["embed", "lm_head", "output", "classifier"]
|
||||
if any(skip_name in layer_name.lower() for skip_name in skip_layers):
|
||||
return False
|
||||
|
||||
# Only convert Linear layers
|
||||
if not (hasattr(module, "weight") and len(module.weight.shape) == 2):
|
||||
return False
|
||||
|
||||
# Check dimension alignment for FP8 kernels
|
||||
weight = module.weight
|
||||
in_features, out_features = weight.shape[1], weight.shape[0]
|
||||
|
||||
# Skip layers with dimensions not divisible by 16 to avoid kernel errors
|
||||
if in_features % 16 != 0 or out_features % 16 != 0:
|
||||
logger.debug(
|
||||
f"Skipping layer {layer_name} with dimensions {out_features}x{in_features} (not divisible by 16)"
|
||||
)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
# Map FSDP all-gather setting if available (this affects the underlying implementation)
|
||||
if hasattr(model_args, "fp8_enable_fsdp_float8_all_gather") and model_args.fp8_enable_fsdp_float8_all_gather:
|
||||
logger.info_rank0("FSDP float8 all-gather optimization requested")
|
||||
|
||||
return [AORecipeKwargs(config=config, module_filter_func=module_filter_func)]
|
||||
except Exception as e:
|
||||
logger.info_rank0(f"Failed to create FP8 configuration: {e}")
|
||||
return []
|
||||
|
||||
|
||||
def get_fp8_mixed_precision(model_args: "ModelArguments") -> Optional[str]:
|
||||
"""Get the mixed precision setting for Accelerate when using FP8.
|
||||
|
||||
Args:
|
||||
model_args: Model arguments containing FP8 configuration
|
||||
|
||||
Returns:
|
||||
"fp8" if FP8 is enabled, None otherwise
|
||||
"""
|
||||
return "fp8" if model_args.fp8 else None
|
||||
|
||||
|
||||
def configure_fp8_environment(model_args: "ModelArguments") -> None:
|
||||
"""Configure FP8 environment for HuggingFace Accelerate.
|
||||
|
||||
FP8 training is handled entirely through HuggingFace Accelerate, regardless of whether
|
||||
DeepSpeed or FSDP is used for distributed training. This function sets up the environment
|
||||
variables and validates the FP8 configuration.
|
||||
|
||||
Args:
|
||||
model_args: Model arguments containing FP8 configuration
|
||||
"""
|
||||
import os
|
||||
|
||||
if not model_args.fp8:
|
||||
return
|
||||
|
||||
# Set mixed precision to fp8 for HuggingFace Accelerate
|
||||
os.environ["ACCELERATE_MIXED_PRECISION"] = "fp8"
|
||||
logger.info_rank0("Set ACCELERATE_MIXED_PRECISION=fp8")
|
||||
|
||||
# Configure FP8 backend and options
|
||||
backend = getattr(model_args, "fp8_backend", "auto")
|
||||
if backend != "auto":
|
||||
os.environ["FP8_BACKEND"] = backend
|
||||
logger.info_rank0(f"Set FP8_BACKEND={backend}")
|
||||
|
||||
# Create and validate FP8 recipe kwargs (for logging/debugging)
|
||||
fp8_kwargs = create_fp8_kwargs(model_args)
|
||||
logger.info_rank0(f"FP8 AORecipeKwargs created: {len(fp8_kwargs)} items")
|
||||
|
||||
# Enable FSDP float8 all-gather optimization if requested
|
||||
if hasattr(model_args, "fp8_enable_fsdp_float8_all_gather") and model_args.fp8_enable_fsdp_float8_all_gather:
|
||||
os.environ["FP8_ENABLE_FSDP_FLOAT8_ALL_GATHER"] = "true"
|
||||
logger.info_rank0("Set FP8_ENABLE_FSDP_FLOAT8_ALL_GATHER=true")
|
||||
|
||||
logger.info_rank0("FP8 environment configured - all FP8 training handled by HuggingFace Accelerate")
|
||||
|
||||
|
||||
def verify_fp8_status(accelerator, model_args: "ModelArguments") -> None:
|
||||
"""Verify that FP8 training is actually working after model preparation.
|
||||
|
||||
Args:
|
||||
accelerator: The HuggingFace Accelerator instance
|
||||
model_args: Model arguments containing FP8 configuration
|
||||
"""
|
||||
if not model_args.fp8:
|
||||
return
|
||||
|
||||
# Check Accelerate's FP8 status
|
||||
fp8_enabled = getattr(accelerator, "fp8_enabled", False)
|
||||
fp8_backend_type = getattr(accelerator, "fp8_backend", "UNKNOWN")
|
||||
|
||||
backend = getattr(model_args, "fp8_backend", "auto")
|
||||
if backend == "torchao" or backend == "auto":
|
||||
logger.info_rank0(
|
||||
"FP8 training enabled with TorchAO backend. For optimal performance, "
|
||||
"ensure model layer dimensions are mostly divisible by 16. "
|
||||
"If you encounter issues, try fp8_backend='te' with Transformer Engine."
|
||||
)
|
||||
else:
|
||||
logger.info_rank0(f"FP8 training enabled with {backend} backend.")
|
||||
|
||||
logger.info_rank0(f"Accelerate FP8 status - enabled: {fp8_enabled}, backend: {fp8_backend_type}")
|
||||
|
||||
if not fp8_enabled:
|
||||
logger.info_rank0("WARNING: FP8 was requested but Accelerate shows fp8_enabled=False. FP8 may not be working.")
|
||||
@@ -390,7 +390,7 @@ class CustomPPOTrainer(PPOTrainer, Trainer):
|
||||
batch: dict[str, torch.Tensor] = self.prepare_model_inputs(queries, responses)
|
||||
unwrapped_model: AutoModelForCausalLMWithValueHead = self.accelerator.unwrap_model(self.model)
|
||||
|
||||
if self.finetuning_args.reward_model_type == "lora":
|
||||
if self.finetuning_args.reward_model_type in ["lora", "oft"]:
|
||||
replace_model(unwrapped_model, target="reward")
|
||||
reward_model = self.model
|
||||
else:
|
||||
@@ -399,7 +399,7 @@ class CustomPPOTrainer(PPOTrainer, Trainer):
|
||||
with unwrap_model_for_generation(reward_model, self.accelerator), self.amp_context: # support bf16
|
||||
values: torch.Tensor = reward_model(**batch, return_dict=True, use_cache=False)[-1]
|
||||
|
||||
if self.finetuning_args.reward_model_type == "lora":
|
||||
if self.finetuning_args.reward_model_type in ["lora", "oft"]:
|
||||
replace_model(unwrapped_model, target="default")
|
||||
|
||||
rewards = values.gather(dim=-1, index=(batch["attention_mask"].sum(dim=-1, keepdim=True) - 1))
|
||||
|
||||
@@ -21,21 +21,29 @@ from typing_extensions import override
|
||||
|
||||
from ...extras.packages import is_transformers_version_greater_than
|
||||
from ..callbacks import SaveProcessorCallback
|
||||
from ..fp8_utils import configure_fp8_environment, verify_fp8_status
|
||||
from ..trainer_utils import create_custom_optimizer, create_custom_scheduler
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from transformers import ProcessorMixin
|
||||
|
||||
from ...hparams import FinetuningArguments
|
||||
from ...hparams import FinetuningArguments, ModelArguments
|
||||
|
||||
|
||||
class CustomTrainer(Trainer):
|
||||
r"""Inherit Trainer for custom optimizer."""
|
||||
|
||||
def __init__(
|
||||
self, finetuning_args: "FinetuningArguments", processor: Optional["ProcessorMixin"], **kwargs
|
||||
self,
|
||||
finetuning_args: "FinetuningArguments",
|
||||
processor: Optional["ProcessorMixin"],
|
||||
model_args: Optional["ModelArguments"] = None,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
# Configure FP8 environment if enabled
|
||||
if model_args is not None and model_args.fp8:
|
||||
configure_fp8_environment(model_args)
|
||||
if is_transformers_version_greater_than("4.46"):
|
||||
kwargs["processing_class"] = kwargs.pop("tokenizer")
|
||||
|
||||
@@ -56,6 +64,10 @@ class CustomTrainer(Trainer):
|
||||
self.accelerator.clip_grad_norm_ = MethodType(clip_grad_norm_old_version, self.accelerator)
|
||||
self.add_callback(BAdamCallback)
|
||||
|
||||
# Verify FP8 status after trainer initialization (accelerator should be available)
|
||||
if model_args is not None and model_args.fp8 and hasattr(self, "accelerator"):
|
||||
verify_fp8_status(self.accelerator, model_args)
|
||||
|
||||
@override
|
||||
def create_optimizer(self) -> "torch.optim.Optimizer":
|
||||
if self.optimizer is None:
|
||||
|
||||
@@ -21,11 +21,11 @@ from typing import TYPE_CHECKING, Optional
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from transformers.utils import is_jieba_available, is_nltk_available
|
||||
from transformers.utils import is_nltk_available
|
||||
|
||||
from ...extras.constants import IGNORE_INDEX
|
||||
from ...extras.misc import numpify
|
||||
from ...extras.packages import is_rouge_available
|
||||
from ...extras.packages import is_jieba_available, is_rouge_available
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
||||
@@ -29,6 +29,7 @@ from ...extras import logging
|
||||
from ...extras.constants import IGNORE_INDEX
|
||||
from ...extras.packages import is_transformers_version_greater_than
|
||||
from ..callbacks import SaveProcessorCallback
|
||||
from ..fp8_utils import configure_fp8_environment, verify_fp8_status
|
||||
from ..trainer_utils import create_custom_optimizer, create_custom_scheduler
|
||||
|
||||
|
||||
@@ -37,7 +38,7 @@ if TYPE_CHECKING:
|
||||
from transformers import PreTrainedTokenizer, ProcessorMixin
|
||||
from transformers.trainer import PredictionOutput
|
||||
|
||||
from ...hparams import FinetuningArguments
|
||||
from ...hparams import FinetuningArguments, ModelArguments
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
@@ -50,9 +51,13 @@ class CustomSeq2SeqTrainer(Seq2SeqTrainer):
|
||||
self,
|
||||
finetuning_args: "FinetuningArguments",
|
||||
processor: Optional["ProcessorMixin"],
|
||||
model_args: Optional["ModelArguments"] = None,
|
||||
gen_kwargs: Optional[dict[str, Any]] = None,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
# Configure FP8 environment if enabled
|
||||
if model_args is not None and model_args.fp8:
|
||||
configure_fp8_environment(model_args)
|
||||
if is_transformers_version_greater_than("4.46"):
|
||||
kwargs["processing_class"] = kwargs.pop("tokenizer")
|
||||
else:
|
||||
@@ -78,6 +83,15 @@ class CustomSeq2SeqTrainer(Seq2SeqTrainer):
|
||||
self.accelerator.clip_grad_norm_ = MethodType(clip_grad_norm_old_version, self.accelerator)
|
||||
self.add_callback(BAdamCallback)
|
||||
|
||||
if finetuning_args.use_dft_loss:
|
||||
from ..trainer_utils import dft_loss_func
|
||||
|
||||
self.compute_loss_func = dft_loss_func
|
||||
|
||||
# Verify FP8 status after trainer initialization (accelerator should be available)
|
||||
if model_args is not None and model_args.fp8 and hasattr(self, "accelerator"):
|
||||
verify_fp8_status(self.accelerator, model_args)
|
||||
|
||||
@override
|
||||
def create_optimizer(self) -> "torch.optim.Optimizer":
|
||||
if self.optimizer is None:
|
||||
|
||||
@@ -631,6 +631,51 @@ def get_batch_logps(
|
||||
return logps, valid_length
|
||||
|
||||
|
||||
def dft_loss_func(outputs, labels, num_items_in_batch=None):
|
||||
logits = outputs.get("logits")
|
||||
if logits is None:
|
||||
return outputs.get("loss", torch.tensor(0.0))
|
||||
|
||||
logits = logits.float()
|
||||
vocab_size = logits.size(-1)
|
||||
labels = torch.nn.functional.pad(labels, (0, 1), value=-100)
|
||||
shift_labels = labels[..., 1:].contiguous()
|
||||
logits = logits.view(-1, vocab_size)
|
||||
shift_labels = shift_labels.view(-1)
|
||||
shift_labels = shift_labels.to(logits.device)
|
||||
|
||||
loss = _dft_cross_entropy(logits, shift_labels, num_items_in_batch)
|
||||
return loss
|
||||
|
||||
|
||||
def _dft_cross_entropy(
|
||||
source: torch.Tensor,
|
||||
target: torch.Tensor,
|
||||
num_items_in_batch: Optional[torch.Tensor] = None,
|
||||
ignore_index: int = -100,
|
||||
) -> torch.Tensor:
|
||||
per_token_loss = torch.nn.functional.cross_entropy(source, target, ignore_index=ignore_index, reduction="none")
|
||||
valid_mask = target != ignore_index
|
||||
if not valid_mask.any():
|
||||
return torch.tensor(0.0, device=source.device, dtype=source.dtype)
|
||||
|
||||
valid_losses = per_token_loss[valid_mask]
|
||||
|
||||
with torch.no_grad():
|
||||
target_probs = torch.exp(-valid_losses)
|
||||
|
||||
weighted_losses = valid_losses * target_probs
|
||||
|
||||
if num_items_in_batch is not None:
|
||||
total_loss = weighted_losses.sum()
|
||||
if torch.is_tensor(num_items_in_batch):
|
||||
num_items_in_batch = num_items_in_batch.to(total_loss.device)
|
||||
loss = total_loss / num_items_in_batch
|
||||
else:
|
||||
loss = weighted_losses.mean()
|
||||
return loss
|
||||
|
||||
|
||||
def nested_detach(
|
||||
tensors: Union["torch.Tensor", list["torch.Tensor"], tuple["torch.Tensor"], dict[str, "torch.Tensor"]],
|
||||
clone: bool = False,
|
||||
|
||||
@@ -77,14 +77,19 @@ def load_config() -> dict[str, Union[str, dict[str, Any]]]:
|
||||
with open(_get_config_path(), encoding="utf-8") as f:
|
||||
return safe_load(f)
|
||||
except Exception:
|
||||
return {"lang": None, "last_model": None, "path_dict": {}, "cache_dir": None}
|
||||
return {"lang": None, "hub_name": None, "last_model": None, "path_dict": {}, "cache_dir": None}
|
||||
|
||||
|
||||
def save_config(lang: str, model_name: Optional[str] = None, model_path: Optional[str] = None) -> None:
|
||||
def save_config(
|
||||
lang: str, hub_name: Optional[str] = None, model_name: Optional[str] = None, model_path: Optional[str] = None
|
||||
) -> None:
|
||||
r"""Save user config."""
|
||||
os.makedirs(DEFAULT_CACHE_DIR, exist_ok=True)
|
||||
user_config = load_config()
|
||||
user_config["lang"] = lang or user_config["lang"]
|
||||
if hub_name:
|
||||
user_config["hub_name"] = hub_name
|
||||
|
||||
if model_name:
|
||||
user_config["last_model"] = model_name
|
||||
|
||||
@@ -247,7 +252,7 @@ def create_ds_config() -> None:
|
||||
"stage": 2,
|
||||
"allgather_partitions": True,
|
||||
"allgather_bucket_size": 5e8,
|
||||
"overlap_comm": True,
|
||||
"overlap_comm": False,
|
||||
"reduce_scatter": True,
|
||||
"reduce_bucket_size": 5e8,
|
||||
"contiguous_gradients": True,
|
||||
@@ -262,7 +267,7 @@ def create_ds_config() -> None:
|
||||
|
||||
ds_config["zero_optimization"] = {
|
||||
"stage": 3,
|
||||
"overlap_comm": True,
|
||||
"overlap_comm": False,
|
||||
"contiguous_gradients": True,
|
||||
"sub_group_size": 1e9,
|
||||
"reduce_bucket_size": "auto",
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
from .chatbot import create_chat_box
|
||||
from .eval import create_eval_tab
|
||||
from .export import create_export_tab
|
||||
from .footer import create_footer
|
||||
from .infer import create_infer_tab
|
||||
from .top import create_top
|
||||
from .train import create_train_tab
|
||||
@@ -24,6 +25,7 @@ __all__ = [
|
||||
"create_chat_box",
|
||||
"create_eval_tab",
|
||||
"create_export_tab",
|
||||
"create_footer",
|
||||
"create_infer_tab",
|
||||
"create_top",
|
||||
"create_train_tab",
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import inspect
|
||||
import json
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
@@ -50,7 +51,14 @@ def create_chat_box(
|
||||
) -> tuple["Component", "Component", dict[str, "Component"]]:
|
||||
lang = engine.manager.get_elem_by_id("top.lang")
|
||||
with gr.Column(visible=visible) as chat_box:
|
||||
chatbot = gr.Chatbot(type="messages", show_copy_button=True)
|
||||
kwargs = {}
|
||||
if "show_copy_button" in inspect.signature(gr.Chatbot.__init__).parameters:
|
||||
kwargs["show_copy_button"] = True
|
||||
|
||||
if "resizable" in inspect.signature(gr.Chatbot.__init__).parameters:
|
||||
kwargs["resizable"] = True
|
||||
|
||||
chatbot = gr.Chatbot(type="messages", **kwargs)
|
||||
messages = gr.State([])
|
||||
with gr.Row():
|
||||
with gr.Column(scale=4):
|
||||
|
||||
45
src/llamafactory/webui/components/footer.py
Normal file
@@ -0,0 +1,45 @@
|
||||
# Copyright 2025 the LlamaFactory team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from ...extras.misc import get_current_memory
|
||||
from ...extras.packages import is_gradio_available
|
||||
|
||||
|
||||
if is_gradio_available():
|
||||
import gradio as gr
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from gradio.components import Component
|
||||
|
||||
|
||||
def get_device_memory() -> "gr.Slider":
|
||||
free, total = get_current_memory()
|
||||
if total != -1:
|
||||
used = round((total - free) / (1024**3), 2)
|
||||
total = round(total / (1024**3), 2)
|
||||
return gr.Slider(minimum=0, maximum=total, value=used, step=0.01, visible=True)
|
||||
else:
|
||||
return gr.Slider(visible=False)
|
||||
|
||||
|
||||
def create_footer() -> dict[str, "Component"]:
|
||||
with gr.Row():
|
||||
device_memory = gr.Slider(visible=False, interactive=False)
|
||||
timer = gr.Timer(value=5)
|
||||
|
||||
timer.tick(get_device_memory, outputs=[device_memory], queue=False)
|
||||
return dict(device_memory=device_memory)
|
||||
@@ -16,9 +16,10 @@ from typing import TYPE_CHECKING
|
||||
|
||||
from ...data import TEMPLATES
|
||||
from ...extras.constants import METHODS, SUPPORTED_MODELS
|
||||
from ...extras.misc import use_modelscope, use_openmind
|
||||
from ...extras.packages import is_gradio_available
|
||||
from ..common import save_config
|
||||
from ..control import can_quantize, can_quantize_to, check_template, get_model_info, list_checkpoints
|
||||
from ..control import can_quantize, can_quantize_to, check_template, get_model_info, list_checkpoints, switch_hub
|
||||
|
||||
|
||||
if is_gradio_available():
|
||||
@@ -33,8 +34,10 @@ def create_top() -> dict[str, "Component"]:
|
||||
with gr.Row():
|
||||
lang = gr.Dropdown(choices=["en", "ru", "zh", "ko", "ja"], value=None, scale=1)
|
||||
available_models = list(SUPPORTED_MODELS.keys()) + ["Custom"]
|
||||
model_name = gr.Dropdown(choices=available_models, value=None, scale=3)
|
||||
model_path = gr.Textbox(scale=3)
|
||||
model_name = gr.Dropdown(choices=available_models, value=None, scale=2)
|
||||
model_path = gr.Textbox(scale=2)
|
||||
default_hub = "modelscope" if use_modelscope() else "openmind" if use_openmind() else "huggingface"
|
||||
hub_name = gr.Dropdown(choices=["huggingface", "modelscope", "openmind"], value=default_hub, scale=2)
|
||||
|
||||
with gr.Row():
|
||||
finetuning_type = gr.Dropdown(choices=METHODS, value="lora", scale=1)
|
||||
@@ -50,18 +53,25 @@ def create_top() -> dict[str, "Component"]:
|
||||
model_name.change(get_model_info, [model_name], [model_path, template], queue=False).then(
|
||||
list_checkpoints, [model_name, finetuning_type], [checkpoint_path], queue=False
|
||||
).then(check_template, [lang, template])
|
||||
model_name.input(save_config, inputs=[lang, model_name], queue=False)
|
||||
model_path.input(save_config, inputs=[lang, model_name, model_path], queue=False)
|
||||
model_name.input(save_config, inputs=[lang, hub_name, model_name], queue=False)
|
||||
model_path.input(save_config, inputs=[lang, hub_name, model_name, model_path], queue=False)
|
||||
finetuning_type.change(can_quantize, [finetuning_type], [quantization_bit], queue=False).then(
|
||||
list_checkpoints, [model_name, finetuning_type], [checkpoint_path], queue=False
|
||||
)
|
||||
checkpoint_path.focus(list_checkpoints, [model_name, finetuning_type], [checkpoint_path], queue=False)
|
||||
quantization_method.change(can_quantize_to, [quantization_method], [quantization_bit], queue=False)
|
||||
hub_name.change(switch_hub, inputs=[hub_name], queue=False).then(
|
||||
get_model_info, [model_name], [model_path, template], queue=False
|
||||
).then(list_checkpoints, [model_name, finetuning_type], [checkpoint_path], queue=False).then(
|
||||
check_template, [lang, template]
|
||||
)
|
||||
hub_name.input(save_config, inputs=[lang, hub_name], queue=False)
|
||||
|
||||
return dict(
|
||||
lang=lang,
|
||||
model_name=model_name,
|
||||
model_path=model_path,
|
||||
hub_name=hub_name,
|
||||
finetuning_type=finetuning_type,
|
||||
checkpoint_path=checkpoint_path,
|
||||
quantization_bit=quantization_bit,
|
||||
|
||||
@@ -38,6 +38,15 @@ if is_gradio_available():
|
||||
import gradio as gr
|
||||
|
||||
|
||||
def switch_hub(hub_name: str) -> None:
|
||||
r"""Switch model hub.
|
||||
|
||||
Inputs: top.hub_name
|
||||
"""
|
||||
os.environ["USE_MODELSCOPE_HUB"] = "1" if hub_name == "modelscope" else "0"
|
||||
os.environ["USE_OPENMIND_HUB"] = "1" if hub_name == "openmind" else "0"
|
||||
|
||||
|
||||
def can_quantize(finetuning_type: str) -> "gr.Dropdown":
|
||||
r"""Judge if the quantization is available in this finetuning type.
|
||||
|
||||
@@ -112,7 +121,7 @@ def get_trainer_info(lang: str, output_path: os.PathLike, do_train: bool) -> tup
|
||||
running_log_path = os.path.join(output_path, RUNNING_LOG)
|
||||
if os.path.isfile(running_log_path):
|
||||
with open(running_log_path, encoding="utf-8") as f:
|
||||
running_log = f.read()[-20000:] # avoid lengthy log
|
||||
running_log = "```\n" + f.read()[-20000:] + "\n```\n" # avoid lengthy log
|
||||
|
||||
trainer_log_path = os.path.join(output_path, TRAINER_LOG)
|
||||
if os.path.isfile(trainer_log_path):
|
||||
|
||||
@@ -49,11 +49,13 @@ class Engine:
|
||||
def resume(self):
|
||||
r"""Get the initial value of gradio components and restores training status if necessary."""
|
||||
user_config = load_config() if not self.demo_mode else {} # do not use config in demo mode
|
||||
lang = user_config.get("lang", None) or "en"
|
||||
lang = user_config.get("lang") or "en"
|
||||
init_dict = {"top.lang": {"value": lang}, "infer.chat_box": {"visible": self.chatter.loaded}}
|
||||
|
||||
if not self.pure_chat:
|
||||
current_time = get_time()
|
||||
hub_name = user_config.get("hub_name") or "huggingface"
|
||||
init_dict["top.hub_name"] = {"value": hub_name}
|
||||
init_dict["train.current_time"] = {"value": current_time}
|
||||
init_dict["train.output_dir"] = {"value": f"train_{current_time}"}
|
||||
init_dict["train.config_path"] = {"value": f"{current_time}.yaml"}
|
||||
|
||||
@@ -22,6 +22,7 @@ from .components import (
|
||||
create_chat_box,
|
||||
create_eval_tab,
|
||||
create_export_tab,
|
||||
create_footer,
|
||||
create_infer_tab,
|
||||
create_top,
|
||||
create_train_tab,
|
||||
@@ -38,15 +39,13 @@ def create_ui(demo_mode: bool = False) -> "gr.Blocks":
|
||||
engine = Engine(demo_mode=demo_mode, pure_chat=False)
|
||||
hostname = os.getenv("HOSTNAME", os.getenv("COMPUTERNAME", platform.node())).split(".")[0]
|
||||
|
||||
with gr.Blocks(title=f"LLaMA Board ({hostname})", css=CSS) as demo:
|
||||
with gr.Blocks(title=f"LLaMA Factory ({hostname})", css=CSS) as demo:
|
||||
title = gr.HTML()
|
||||
subtitle = gr.HTML()
|
||||
if demo_mode:
|
||||
gr.HTML("<h1><center>LLaMA Board: A One-stop Web UI for Getting Started with LLaMA Factory</center></h1>")
|
||||
gr.HTML(
|
||||
'<h3><center>Visit <a href="https://github.com/hiyouga/LLaMA-Factory" target="_blank">'
|
||||
"LLaMA Factory</a> for details.</center></h3>"
|
||||
)
|
||||
gr.DuplicateButton(value="Duplicate Space for private use", elem_classes="duplicate-button")
|
||||
|
||||
engine.manager.add_elems("head", {"title": title, "subtitle": subtitle})
|
||||
engine.manager.add_elems("top", create_top())
|
||||
lang: gr.Dropdown = engine.manager.get_elem_by_id("top.lang")
|
||||
|
||||
@@ -63,6 +62,7 @@ def create_ui(demo_mode: bool = False) -> "gr.Blocks":
|
||||
with gr.Tab("Export"):
|
||||
engine.manager.add_elems("export", create_export_tab(engine))
|
||||
|
||||
engine.manager.add_elems("footer", create_footer())
|
||||
demo.load(engine.resume, outputs=engine.manager.get_elem_list(), concurrency_limit=None)
|
||||
lang.change(engine.change_lang, [lang], engine.manager.get_elem_list(), queue=False)
|
||||
lang.input(save_config, inputs=[lang], queue=False)
|
||||
|
||||
@@ -13,6 +13,60 @@
|
||||
# limitations under the License.
|
||||
|
||||
LOCALES = {
|
||||
"title": {
|
||||
"en": {
|
||||
"value": "<h1><center>🦙🏭LLaMA Factory: Unified Efficient Fine-Tuning of 100+ LLMs</center></h1>",
|
||||
},
|
||||
"ru": {
|
||||
"value": "<h1><center>🦙🏭LLaMA Factory: Унифицированная эффективная тонкая настройка 100+ LLMs</center></h1>",
|
||||
},
|
||||
"zh": {
|
||||
"value": "<h1><center>🦙🏭LLaMA Factory: 一站式大模型高效微调平台</center></h1>",
|
||||
},
|
||||
"ko": {
|
||||
"value": "<h1><center>🦙🏭LLaMA Factory: 100+ LLMs를 위한 통합 효율적인 튜닝</center></h1>",
|
||||
},
|
||||
"ja": {
|
||||
"value": "<h1><center>🦙🏭LLaMA Factory: 100+ LLMs の統合効率的なチューニング</center></h1>",
|
||||
},
|
||||
},
|
||||
"subtitle": {
|
||||
"en": {
|
||||
"value": (
|
||||
"<h3><center>Visit <a href='https://github.com/hiyouga/LLaMA-Factory' target='_blank'>"
|
||||
"GitHub Page</a> <a href='https://llamafactory.readthedocs.io/en/latest/' target='_blank'>"
|
||||
"Documentation</a></center></h3>"
|
||||
),
|
||||
},
|
||||
"ru": {
|
||||
"value": (
|
||||
"<h3><center>Посетить <a href='https://github.com/hiyouga/LLaMA-Factory' target='_blank'>"
|
||||
"страницу GitHub</a> <a href='https://llamafactory.readthedocs.io/en/latest/' target='_blank'>"
|
||||
"Документацию</a></center></h3>"
|
||||
),
|
||||
},
|
||||
"zh": {
|
||||
"value": (
|
||||
"<h3><center>访问 <a href='https://github.com/hiyouga/LLaMA-Factory' target='_blank'>"
|
||||
"GitHub 主页</a> <a href='https://llamafactory.readthedocs.io/zh-cn/latest/' target='_blank'>"
|
||||
"官方文档</a></center></h3>"
|
||||
),
|
||||
},
|
||||
"ko": {
|
||||
"value": (
|
||||
"<h3><center><a href='https://github.com/hiyouga/LLaMA-Factory' target='_blank'>"
|
||||
"GitHub 페이지</a> <a href='https://llamafactory.readthedocs.io/en/latest/' target='_blank'>"
|
||||
"공식 문서</a>를 방문하세요.</center></h3>"
|
||||
),
|
||||
},
|
||||
"ja": {
|
||||
"value": (
|
||||
"<h3><center><a href='https://github.com/hiyouga/LLaMA-Factory' target='_blank'>"
|
||||
"GitHub ページ</a> <a href='https://llamafactory.readthedocs.io/en/latest/' target='_blank'>"
|
||||
"ドキュメント</a>にアクセスする</center></h3>"
|
||||
),
|
||||
},
|
||||
},
|
||||
"lang": {
|
||||
"en": {
|
||||
"label": "Language",
|
||||
@@ -74,6 +128,28 @@ LOCALES = {
|
||||
"info": "事前学習済みモデルへのパス、または Hugging Face のモデル識別子。",
|
||||
},
|
||||
},
|
||||
"hub_name": {
|
||||
"en": {
|
||||
"label": "Hub name",
|
||||
"info": "Choose the model download source.",
|
||||
},
|
||||
"ru": {
|
||||
"label": "Имя хаба",
|
||||
"info": "Выберите источник загрузки модели.",
|
||||
},
|
||||
"zh": {
|
||||
"label": "模型下载源",
|
||||
"info": "选择模型下载源。(网络受限环境推荐使用 ModelScope)",
|
||||
},
|
||||
"ko": {
|
||||
"label": "모델 다운로드 소스",
|
||||
"info": "모델 다운로드 소스를 선택하세요.",
|
||||
},
|
||||
"ja": {
|
||||
"label": "モデルダウンロードソース",
|
||||
"info": "モデルをダウンロードするためのソースを選択してください。",
|
||||
},
|
||||
},
|
||||
"finetuning_type": {
|
||||
"en": {
|
||||
"label": "Finetuning method",
|
||||
@@ -2849,6 +2925,28 @@ LOCALES = {
|
||||
"value": "エクスポート",
|
||||
},
|
||||
},
|
||||
"device_memory": {
|
||||
"en": {
|
||||
"label": "Device memory",
|
||||
"info": "Current memory usage of the device (GB).",
|
||||
},
|
||||
"ru": {
|
||||
"label": "Память устройства",
|
||||
"info": "Текущая память на устройстве (GB).",
|
||||
},
|
||||
"zh": {
|
||||
"label": "设备显存",
|
||||
"info": "当前设备的显存(GB)。",
|
||||
},
|
||||
"ko": {
|
||||
"label": "디바이스 메모리",
|
||||
"info": "지금 사용 중인 기기 메모리 (GB).",
|
||||
},
|
||||
"ja": {
|
||||
"label": "デバイスメモリ",
|
||||
"info": "現在のデバイスのメモリ(GB)。",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -16,14 +16,13 @@ import json
|
||||
import os
|
||||
from collections.abc import Generator
|
||||
from copy import deepcopy
|
||||
from subprocess import Popen, TimeoutExpired
|
||||
from subprocess import PIPE, Popen, TimeoutExpired
|
||||
from typing import TYPE_CHECKING, Any, Optional
|
||||
|
||||
from transformers.trainer import TRAINING_ARGS_NAME
|
||||
from transformers.utils import is_torch_npu_available
|
||||
|
||||
from ..extras.constants import LLAMABOARD_CONFIG, MULTIMODAL_SUPPORTED_MODELS, PEFT_METHODS, TRAINING_STAGES
|
||||
from ..extras.misc import is_accelerator_available, torch_gc, use_ray
|
||||
from ..extras.misc import is_accelerator_available, torch_gc
|
||||
from ..extras.packages import is_gradio_available
|
||||
from .common import (
|
||||
DEFAULT_CACHE_DIR,
|
||||
@@ -114,7 +113,7 @@ class Runner:
|
||||
|
||||
return ""
|
||||
|
||||
def _finalize(self, lang: str, finish_info: str) -> str:
|
||||
def _finalize(self, lang: str, finish_info: str) -> None:
|
||||
r"""Clean the cached memory and resets the runner."""
|
||||
finish_info = ALERTS["info_aborted"][lang] if self.aborted else finish_info
|
||||
gr.Info(finish_info)
|
||||
@@ -123,7 +122,6 @@ class Runner:
|
||||
self.running = False
|
||||
self.running_data = None
|
||||
torch_gc()
|
||||
return finish_info
|
||||
|
||||
def _parse_train_args(self, data: dict["Component", Any]) -> dict[str, Any]:
|
||||
r"""Build and validate the training arguments."""
|
||||
@@ -314,11 +312,13 @@ class Runner:
|
||||
max_samples=int(get("eval.max_samples")),
|
||||
per_device_eval_batch_size=get("eval.batch_size"),
|
||||
predict_with_generate=True,
|
||||
report_to="none",
|
||||
max_new_tokens=get("eval.max_new_tokens"),
|
||||
top_p=get("eval.top_p"),
|
||||
temperature=get("eval.temperature"),
|
||||
output_dir=get_save_dir(model_name, finetuning_type, get("eval.output_dir")),
|
||||
trust_remote_code=True,
|
||||
ddp_timeout=180000000,
|
||||
)
|
||||
|
||||
if get("eval.predict"):
|
||||
@@ -375,7 +375,7 @@ class Runner:
|
||||
env["FORCE_TORCHRUN"] = "1"
|
||||
|
||||
# NOTE: DO NOT USE shell=True to avoid security risk
|
||||
self.trainer = Popen(["llamafactory-cli", "train", save_cmd(args)], env=env)
|
||||
self.trainer = Popen(["llamafactory-cli", "train", save_cmd(args)], env=env, stderr=PIPE, text=True)
|
||||
yield from self.monitor()
|
||||
|
||||
def _build_config_dict(self, data: dict["Component", Any]) -> dict[str, Any]:
|
||||
@@ -417,7 +417,8 @@ class Runner:
|
||||
swanlab_link = self.manager.get_elem_by_id("train.swanlab_link") if self.do_train else None
|
||||
|
||||
running_log = ""
|
||||
while self.trainer is not None:
|
||||
return_code = -1
|
||||
while return_code == -1:
|
||||
if self.aborted:
|
||||
yield {
|
||||
output_box: ALERTS["info_aborting"][lang],
|
||||
@@ -436,27 +437,26 @@ class Runner:
|
||||
return_dict[swanlab_link] = running_info["swanlab_link"]
|
||||
|
||||
yield return_dict
|
||||
|
||||
try:
|
||||
self.trainer.wait(2)
|
||||
self.trainer = None
|
||||
stderr = self.trainer.communicate(timeout=2)[1]
|
||||
return_code = self.trainer.returncode
|
||||
except TimeoutExpired:
|
||||
continue
|
||||
|
||||
if self.do_train:
|
||||
if os.path.exists(os.path.join(output_path, TRAINING_ARGS_NAME)) or use_ray():
|
||||
finish_info = ALERTS["info_finished"][lang]
|
||||
if return_code == 0 or self.aborted:
|
||||
finish_info = ALERTS["info_finished"][lang]
|
||||
if self.do_train:
|
||||
finish_log = ALERTS["info_finished"][lang] + "\n\n" + running_log
|
||||
else:
|
||||
finish_info = ALERTS["err_failed"][lang]
|
||||
finish_log = load_eval_results(os.path.join(output_path, "all_results.json")) + "\n\n" + running_log
|
||||
else:
|
||||
if os.path.exists(os.path.join(output_path, "all_results.json")) or use_ray():
|
||||
finish_info = load_eval_results(os.path.join(output_path, "all_results.json"))
|
||||
else:
|
||||
finish_info = ALERTS["err_failed"][lang]
|
||||
print(stderr)
|
||||
finish_info = ALERTS["err_failed"][lang]
|
||||
finish_log = ALERTS["err_failed"][lang] + f" Exit code: {return_code}\n\n```\n{stderr}\n```\n"
|
||||
|
||||
return_dict = {
|
||||
output_box: self._finalize(lang, finish_info) + "\n\n" + running_log,
|
||||
progress_bar: gr.Slider(visible=False),
|
||||
}
|
||||
self._finalize(lang, finish_info)
|
||||
return_dict = {output_box: finish_log, progress_bar: gr.Slider(visible=False)}
|
||||
yield return_dict
|
||||
|
||||
def save_args(self, data):
|
||||
|
||||
@@ -110,8 +110,8 @@ def test_glm4_function_formatter():
|
||||
def test_glm4_tool_formatter():
|
||||
formatter = ToolFormatter(tool_format="glm4")
|
||||
assert formatter.apply(content=json.dumps(TOOLS)) == [
|
||||
"你是一个名为 ChatGLM 的人工智能助手。你是基于智谱AI训练的语言模型 GLM-4 模型开发的,"
|
||||
"你的任务是针对用户的问题和要求提供适当的答复和支持。# 可用工具\n\n"
|
||||
"你是一个名为 ChatGLM 的人工智能助手。你是基于智谱 AI 公司训练的语言模型 GLM-4 模型开发的,"
|
||||
"你的任务是针对用户的问题和要求提供适当的答复和支持。\n\n# 可用工具\n\n"
|
||||
f"## test_tool\n\n{json.dumps(TOOLS[0], indent=4, ensure_ascii=False)}\n"
|
||||
"在调用上述函数时,请使用 Json 格式表示调用的参数。"
|
||||
]
|
||||
|
||||
@@ -56,10 +56,17 @@ TEXT_MESSAGES = [
|
||||
{"role": "assistant", "content": "I am fine!"},
|
||||
]
|
||||
|
||||
VIDEO_MESSAGES = [
|
||||
{"role": "user", "content": "<video>What is in this viode?"},
|
||||
{"role": "assistant", "content": "A cat."},
|
||||
]
|
||||
|
||||
AUDIOS = [np.zeros(1600)]
|
||||
|
||||
IMAGES = [Image.new("RGB", (32, 32), (255, 255, 255))]
|
||||
|
||||
VIDEOS = [[Image.new("RGB", (32, 32), (255, 255, 255))] * 4]
|
||||
|
||||
NO_IMAGES = []
|
||||
|
||||
NO_VIDEOS = []
|
||||
@@ -145,6 +152,8 @@ def _check_plugin(
|
||||
plugin.get_mm_inputs(IMAGES, NO_VIDEOS, AUDIOS, IMGLENS, NO_VIDLENS, AUDLENS, BATCH_IDS, processor),
|
||||
expected_mm_inputs,
|
||||
)
|
||||
elif plugin.__class__.__name__ == "Qwen3VLPlugin": # only check replacement
|
||||
assert plugin.process_messages(VIDEO_MESSAGES, NO_IMAGES, VIDEOS, NO_AUDIOS, processor) == expected_mm_messages
|
||||
elif plugin.__class__.__name__ != "BasePlugin": # test mm_messages
|
||||
assert plugin.process_messages(MM_MESSAGES, IMAGES, NO_VIDEOS, NO_AUDIOS, processor) == expected_mm_messages
|
||||
assert plugin.process_token_ids(INPUT_IDS, LABELS, IMAGES, NO_VIDEOS, NO_AUDIOS, tokenizer, processor) == (
|
||||
@@ -238,7 +247,6 @@ def test_llama4_plugin():
|
||||
_check_plugin(**check_inputs)
|
||||
|
||||
|
||||
@pytest.mark.skipif(not is_transformers_version_greater_than("4.47.0"), reason="Requires transformers>=4.47.0")
|
||||
def test_llava_plugin():
|
||||
image_seqlen = 576
|
||||
tokenizer_module = _load_tokenizer_module(model_name_or_path="llava-hf/llava-1.5-7b-hf")
|
||||
@@ -324,7 +332,14 @@ def test_qwen2_omni_plugin():
|
||||
image_seqlen, audio_seqlen = 4, 2
|
||||
tokenizer_module = _load_tokenizer_module(model_name_or_path="Qwen/Qwen2.5-Omni-7B")
|
||||
qwen2_omni_plugin = get_mm_plugin(
|
||||
name="qwen2_omni", audio_token="<|AUDIO|>", image_token="<|IMAGE|>", video_token="<|VIDEO|>"
|
||||
name="qwen2_omni",
|
||||
image_token="<|IMAGE|>",
|
||||
video_token="<|VIDEO|>",
|
||||
audio_token="<|AUDIO|>",
|
||||
vision_bos_token="<|vision_bos|>",
|
||||
vision_eos_token="<|vision_eos|>",
|
||||
audio_bos_token="<|audio_bos|>",
|
||||
audio_eos_token="<|audio_eos|>",
|
||||
)
|
||||
check_inputs = {"plugin": qwen2_omni_plugin, **tokenizer_module}
|
||||
check_inputs["expected_mm_messages"] = [
|
||||
@@ -358,6 +373,27 @@ def test_qwen2_vl_plugin():
|
||||
_check_plugin(**check_inputs)
|
||||
|
||||
|
||||
@pytest.mark.skipif(not is_transformers_version_greater_than("4.57.0"), reason="Requires transformers>=4.57.0")
|
||||
def test_qwen3_vl_plugin():
|
||||
frame_seqlen = 1
|
||||
tokenizer_module = _load_tokenizer_module(model_name_or_path="Qwen/Qwen3-VL-235B-A22B-Instruct")
|
||||
qwen3_vl_plugin = get_mm_plugin(name="qwen3_vl", video_token="<|video_pad|>")
|
||||
check_inputs = {"plugin": qwen3_vl_plugin, **tokenizer_module}
|
||||
check_inputs["expected_mm_messages"] = [
|
||||
{
|
||||
key: value.replace(
|
||||
"<video>", # little different with original processor for default `fps=2` in our repo
|
||||
"<0.2 seconds><|vision_start|>{}<|vision_end|><1.2 seconds><|vision_start|>{}<|vision_end|>".format(
|
||||
"<|video_pad|>" * frame_seqlen, "<|video_pad|>" * frame_seqlen
|
||||
),
|
||||
)
|
||||
for key, value in message.items()
|
||||
}
|
||||
for message in VIDEO_MESSAGES
|
||||
]
|
||||
_check_plugin(**check_inputs)
|
||||
|
||||
|
||||
@pytest.mark.skipif(not is_transformers_version_greater_than("4.47.0"), reason="Requires transformers>=4.47.0")
|
||||
def test_video_llava_plugin():
|
||||
image_seqlen = 256
|
||||
|
||||
@@ -226,6 +226,19 @@ def test_gemma_template(use_fast: bool):
|
||||
_check_template("google/gemma-3-4b-it", "gemma", prompt_str, answer_str, use_fast)
|
||||
|
||||
|
||||
@pytest.mark.skipif(not HF_TOKEN, reason="Gated model.")
|
||||
@pytest.mark.parametrize("use_fast", [True, False])
|
||||
def test_gemma2_template(use_fast: bool):
|
||||
prompt_str = (
|
||||
f"<bos><start_of_turn>user\n{MESSAGES[0]['content']}<end_of_turn>\n"
|
||||
f"<start_of_turn>model\n{MESSAGES[1]['content']}<end_of_turn>\n"
|
||||
f"<start_of_turn>user\n{MESSAGES[2]['content']}<end_of_turn>\n"
|
||||
"<start_of_turn>model\n"
|
||||
)
|
||||
answer_str = f"{MESSAGES[3]['content']}<end_of_turn>\n"
|
||||
_check_template("google/gemma-2-2b-it", "gemma2", prompt_str, answer_str, use_fast)
|
||||
|
||||
|
||||
@pytest.mark.skipif(not HF_TOKEN, reason="Gated model.")
|
||||
@pytest.mark.parametrize("use_fast", [True, False])
|
||||
def test_llama3_template(use_fast: bool):
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
# change if test fails or cache is outdated
|
||||
0.9.3.108
|
||||
0.9.4.102
|
||||
|
||||