mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-09-13 00:22:48 +08:00
Merge branch 'main' into cpei/refactor
Former-commit-id: 2c6262c3cd57906806e53aecd4e5a4423399c1e0
This commit is contained in:
commit
c639e52c6b
61
README.md
61
README.md
@ -30,6 +30,9 @@ Choose your path:
|
|||||||
- **Local machine**: Please refer to [usage](#getting-started)
|
- **Local machine**: Please refer to [usage](#getting-started)
|
||||||
- **Documentation (WIP)**: https://llamafactory.readthedocs.io/zh-cn/latest/
|
- **Documentation (WIP)**: https://llamafactory.readthedocs.io/zh-cn/latest/
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> Except for the above links, all other websites are unauthorized third-party websites. Please carefully use them.
|
||||||
|
|
||||||
## Table of Contents
|
## Table of Contents
|
||||||
|
|
||||||
- [Features](#features)
|
- [Features](#features)
|
||||||
@ -162,34 +165,36 @@ Compared to ChatGLM's [P-Tuning](https://github.com/THUDM/ChatGLM2-6B/tree/main/
|
|||||||
|
|
||||||
## Supported Models
|
## Supported Models
|
||||||
|
|
||||||
| Model | Model size | Template |
|
| Model | Model size | Template |
|
||||||
| ----------------------------------------------------------------- | -------------------------------- | --------- |
|
| ----------------------------------------------------------------- | -------------------------------- | ---------------- |
|
||||||
| [Baichuan 2](https://huggingface.co/baichuan-inc) | 7B/13B | baichuan2 |
|
| [Baichuan 2](https://huggingface.co/baichuan-inc) | 7B/13B | baichuan2 |
|
||||||
| [BLOOM/BLOOMZ](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | - |
|
| [BLOOM/BLOOMZ](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | - |
|
||||||
| [ChatGLM3](https://huggingface.co/THUDM) | 6B | chatglm3 |
|
| [ChatGLM3](https://huggingface.co/THUDM) | 6B | chatglm3 |
|
||||||
| [Command R](https://huggingface.co/CohereForAI) | 35B/104B | cohere |
|
| [Command R](https://huggingface.co/CohereForAI) | 35B/104B | cohere |
|
||||||
| [DeepSeek (Code/MoE)](https://huggingface.co/deepseek-ai) | 7B/16B/67B/236B | deepseek |
|
| [DeepSeek (Code/MoE)](https://huggingface.co/deepseek-ai) | 7B/16B/67B/236B | deepseek |
|
||||||
| [Falcon](https://huggingface.co/tiiuae) | 7B/11B/40B/180B | falcon |
|
| [Falcon](https://huggingface.co/tiiuae) | 7B/11B/40B/180B | falcon |
|
||||||
| [Gemma/Gemma 2/CodeGemma](https://huggingface.co/google) | 2B/7B/9B/27B | gemma |
|
| [Gemma/Gemma 2/CodeGemma](https://huggingface.co/google) | 2B/7B/9B/27B | gemma |
|
||||||
| [GLM-4](https://huggingface.co/THUDM) | 9B | glm4 |
|
| [GLM-4](https://huggingface.co/THUDM) | 9B | glm4 |
|
||||||
| [InternLM2/InternLM2.5](https://huggingface.co/internlm) | 7B/20B | intern2 |
|
| [InternLM2/InternLM2.5](https://huggingface.co/internlm) | 7B/20B | intern2 |
|
||||||
| [Llama](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - |
|
| [Llama](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - |
|
||||||
| [Llama 2](https://huggingface.co/meta-llama) | 7B/13B/70B | llama2 |
|
| [Llama 2](https://huggingface.co/meta-llama) | 7B/13B/70B | llama2 |
|
||||||
| [Llama 3/Llama 3.1](https://huggingface.co/meta-llama) | 8B/70B | llama3 |
|
| [Llama 3-3.2](https://huggingface.co/meta-llama) | 1B/3B/8B/70B | llama3 |
|
||||||
| [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | llava |
|
| [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | llava |
|
||||||
| [MiniCPM](https://huggingface.co/openbmb) | 1B/2B/4B | cpm/cpm3 |
|
| [LLaVA-NeXT](https://huggingface.co/llava-hf) | 7B/8B/13B/34B/72B/110B | llava_next |
|
||||||
| [Mistral/Mixtral](https://huggingface.co/mistralai) | 7B/8x7B/8x22B | mistral |
|
| [LLaVA-NeXT-Video](https://huggingface.co/llava-hf) | 7B/34B | llava_next_video |
|
||||||
| [OLMo](https://huggingface.co/allenai) | 1B/7B | - |
|
| [MiniCPM](https://huggingface.co/openbmb) | 1B/2B/4B | cpm/cpm3 |
|
||||||
| [PaliGemma](https://huggingface.co/google) | 3B | paligemma |
|
| [Mistral/Mixtral](https://huggingface.co/mistralai) | 7B/8x7B/8x22B | mistral |
|
||||||
| [Phi-1.5/Phi-2](https://huggingface.co/microsoft) | 1.3B/2.7B | - |
|
| [OLMo](https://huggingface.co/allenai) | 1B/7B | - |
|
||||||
| [Phi-3](https://huggingface.co/microsoft) | 4B/7B/14B | phi |
|
| [PaliGemma](https://huggingface.co/google) | 3B | paligemma |
|
||||||
| [Qwen (1-2.5) (Code/Math/MoE)](https://huggingface.co/Qwen) | 0.5B/1.5B/3B/7B/14B/32B/72B/110B | qwen |
|
| [Phi-1.5/Phi-2](https://huggingface.co/microsoft) | 1.3B/2.7B | - |
|
||||||
| [Qwen2-VL](https://huggingface.co/Qwen) | 2B/7B/72B | qwen2_vl |
|
| [Phi-3](https://huggingface.co/microsoft) | 4B/7B/14B | phi |
|
||||||
| [StarCoder 2](https://huggingface.co/bigcode) | 3B/7B/15B | - |
|
| [Qwen (1-2.5) (Code/Math/MoE)](https://huggingface.co/Qwen) | 0.5B/1.5B/3B/7B/14B/32B/72B/110B | qwen |
|
||||||
| [XVERSE](https://huggingface.co/xverse) | 7B/13B/65B | xverse |
|
| [Qwen2-VL](https://huggingface.co/Qwen) | 2B/7B/72B | qwen2_vl |
|
||||||
| [Yi/Yi-1.5 (Code)](https://huggingface.co/01-ai) | 1.5B/6B/9B/34B | yi |
|
| [StarCoder 2](https://huggingface.co/bigcode) | 3B/7B/15B | - |
|
||||||
| [Yi-VL](https://huggingface.co/01-ai) | 6B/34B | yi_vl |
|
| [XVERSE](https://huggingface.co/xverse) | 7B/13B/65B | xverse |
|
||||||
| [Yuan 2](https://huggingface.co/IEITYuan) | 2B/51B/102B | yuan |
|
| [Yi/Yi-1.5 (Code)](https://huggingface.co/01-ai) | 1.5B/6B/9B/34B | yi |
|
||||||
|
| [Yi-VL](https://huggingface.co/01-ai) | 6B/34B | yi_vl |
|
||||||
|
| [Yuan 2](https://huggingface.co/IEITYuan) | 2B/51B/102B | yuan |
|
||||||
|
|
||||||
> [!NOTE]
|
> [!NOTE]
|
||||||
> For the "base" models, the `template` argument can be chosen from `default`, `alpaca`, `vicuna` etc. But make sure to use the **corresponding template** for the "instruct/chat" models.
|
> For the "base" models, the `template` argument can be chosen from `default`, `alpaca`, `vicuna` etc. But make sure to use the **corresponding template** for the "instruct/chat" models.
|
||||||
|
61
README_zh.md
61
README_zh.md
@ -31,6 +31,9 @@ https://github.com/user-attachments/assets/e6ce34b0-52d5-4f3e-a830-592106c4c272
|
|||||||
- **入门教程**:https://zhuanlan.zhihu.com/p/695287607
|
- **入门教程**:https://zhuanlan.zhihu.com/p/695287607
|
||||||
- **框架文档**:https://llamafactory.readthedocs.io/zh-cn/latest/
|
- **框架文档**:https://llamafactory.readthedocs.io/zh-cn/latest/
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> 除上述链接以外的其他网站均为未经许可的第三方网站,请小心甄别。
|
||||||
|
|
||||||
## 目录
|
## 目录
|
||||||
|
|
||||||
- [项目特色](#项目特色)
|
- [项目特色](#项目特色)
|
||||||
@ -163,34 +166,36 @@ https://github.com/user-attachments/assets/e6ce34b0-52d5-4f3e-a830-592106c4c272
|
|||||||
|
|
||||||
## 模型
|
## 模型
|
||||||
|
|
||||||
| 模型名 | 模型大小 | Template |
|
| 模型名 | 模型大小 | Template |
|
||||||
| ----------------------------------------------------------------- | -------------------------------- | --------- |
|
| ----------------------------------------------------------------- | -------------------------------- | ---------------- |
|
||||||
| [Baichuan 2](https://huggingface.co/baichuan-inc) | 7B/13B | baichuan2 |
|
| [Baichuan 2](https://huggingface.co/baichuan-inc) | 7B/13B | baichuan2 |
|
||||||
| [BLOOM/BLOOMZ](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | - |
|
| [BLOOM/BLOOMZ](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | - |
|
||||||
| [ChatGLM3](https://huggingface.co/THUDM) | 6B | chatglm3 |
|
| [ChatGLM3](https://huggingface.co/THUDM) | 6B | chatglm3 |
|
||||||
| [Command R](https://huggingface.co/CohereForAI) | 35B/104B | cohere |
|
| [Command R](https://huggingface.co/CohereForAI) | 35B/104B | cohere |
|
||||||
| [DeepSeek (Code/MoE)](https://huggingface.co/deepseek-ai) | 7B/16B/67B/236B | deepseek |
|
| [DeepSeek (Code/MoE)](https://huggingface.co/deepseek-ai) | 7B/16B/67B/236B | deepseek |
|
||||||
| [Falcon](https://huggingface.co/tiiuae) | 7B/11B/40B/180B | falcon |
|
| [Falcon](https://huggingface.co/tiiuae) | 7B/11B/40B/180B | falcon |
|
||||||
| [Gemma/Gemma 2/CodeGemma](https://huggingface.co/google) | 2B/7B/9B/27B | gemma |
|
| [Gemma/Gemma 2/CodeGemma](https://huggingface.co/google) | 2B/7B/9B/27B | gemma |
|
||||||
| [GLM-4](https://huggingface.co/THUDM) | 9B | glm4 |
|
| [GLM-4](https://huggingface.co/THUDM) | 9B | glm4 |
|
||||||
| [InternLM2/InternLM2.5](https://huggingface.co/internlm) | 7B/20B | intern2 |
|
| [InternLM2/InternLM2.5](https://huggingface.co/internlm) | 7B/20B | intern2 |
|
||||||
| [Llama](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - |
|
| [Llama](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - |
|
||||||
| [Llama 2](https://huggingface.co/meta-llama) | 7B/13B/70B | llama2 |
|
| [Llama 2](https://huggingface.co/meta-llama) | 7B/13B/70B | llama2 |
|
||||||
| [Llama 3/Llama 3.1](https://huggingface.co/meta-llama) | 8B/70B | llama3 |
|
| [Llama 3-3.2](https://huggingface.co/meta-llama) | 1B/3B/8B/70B | llama3 |
|
||||||
| [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | llava |
|
| [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | llava |
|
||||||
| [MiniCPM](https://huggingface.co/openbmb) | 1B/2B/4B | cpm/cpm3 |
|
| [LLaVA-NeXT](https://huggingface.co/llava-hf) | 7B/8B/13B/34B/72B/110B | llava_next |
|
||||||
| [Mistral/Mixtral](https://huggingface.co/mistralai) | 7B/8x7B/8x22B | mistral |
|
| [LLaVA-NeXT-Video](https://huggingface.co/llava-hf) | 7B/34B | llava_next_video |
|
||||||
| [OLMo](https://huggingface.co/allenai) | 1B/7B | - |
|
| [MiniCPM](https://huggingface.co/openbmb) | 1B/2B/4B | cpm/cpm3 |
|
||||||
| [PaliGemma](https://huggingface.co/google) | 3B | paligemma |
|
| [Mistral/Mixtral](https://huggingface.co/mistralai) | 7B/8x7B/8x22B | mistral |
|
||||||
| [Phi-1.5/Phi-2](https://huggingface.co/microsoft) | 1.3B/2.7B | - |
|
| [OLMo](https://huggingface.co/allenai) | 1B/7B | - |
|
||||||
| [Phi-3](https://huggingface.co/microsoft) | 4B/7B/14B | phi |
|
| [PaliGemma](https://huggingface.co/google) | 3B | paligemma |
|
||||||
| [Qwen (1-2.5) (Code/Math/MoE)](https://huggingface.co/Qwen) | 0.5B/1.5B/3B/7B/14B/32B/72B/110B | qwen |
|
| [Phi-1.5/Phi-2](https://huggingface.co/microsoft) | 1.3B/2.7B | - |
|
||||||
| [Qwen2-VL](https://huggingface.co/Qwen) | 2B/7B/72B | qwen2_vl |
|
| [Phi-3](https://huggingface.co/microsoft) | 4B/7B/14B | phi |
|
||||||
| [StarCoder 2](https://huggingface.co/bigcode) | 3B/7B/15B | - |
|
| [Qwen (1-2.5) (Code/Math/MoE)](https://huggingface.co/Qwen) | 0.5B/1.5B/3B/7B/14B/32B/72B/110B | qwen |
|
||||||
| [XVERSE](https://huggingface.co/xverse) | 7B/13B/65B | xverse |
|
| [Qwen2-VL](https://huggingface.co/Qwen) | 2B/7B/72B | qwen2_vl |
|
||||||
| [Yi/Yi-1.5 (Code)](https://huggingface.co/01-ai) | 1.5B/6B/9B/34B | yi |
|
| [StarCoder 2](https://huggingface.co/bigcode) | 3B/7B/15B | - |
|
||||||
| [Yi-VL](https://huggingface.co/01-ai) | 6B/34B | yi_vl |
|
| [XVERSE](https://huggingface.co/xverse) | 7B/13B/65B | xverse |
|
||||||
| [Yuan 2](https://huggingface.co/IEITYuan) | 2B/51B/102B | yuan |
|
| [Yi/Yi-1.5 (Code)](https://huggingface.co/01-ai) | 1.5B/6B/9B/34B | yi |
|
||||||
|
| [Yi-VL](https://huggingface.co/01-ai) | 6B/34B | yi_vl |
|
||||||
|
| [Yuan 2](https://huggingface.co/IEITYuan) | 2B/51B/102B | yuan |
|
||||||
|
|
||||||
> [!NOTE]
|
> [!NOTE]
|
||||||
> 对于所有“基座”(Base)模型,`template` 参数可以是 `default`, `alpaca`, `vicuna` 等任意值。但“对话”(Instruct/Chat)模型请务必使用**对应的模板**。
|
> 对于所有“基座”(Base)模型,`template` 参数可以是 `default`, `alpaca`, `vicuna` 等任意值。但“对话”(Instruct/Chat)模型请务必使用**对应的模板**。
|
||||||
|
Binary file not shown.
Before Width: | Height: | Size: 164 KiB After Width: | Height: | Size: 199 KiB |
Binary file not shown.
Before Width: | Height: | Size: 167 KiB After Width: | Height: | Size: 168 KiB |
@ -19,3 +19,4 @@ fire
|
|||||||
packaging
|
packaging
|
||||||
pyyaml
|
pyyaml
|
||||||
numpy<2.0.0
|
numpy<2.0.0
|
||||||
|
av
|
||||||
|
2
setup.py
2
setup.py
@ -54,7 +54,7 @@ extra_require = {
|
|||||||
"gptq": ["optimum>=1.17.0", "auto-gptq>=0.5.0"],
|
"gptq": ["optimum>=1.17.0", "auto-gptq>=0.5.0"],
|
||||||
"awq": ["autoawq"],
|
"awq": ["autoawq"],
|
||||||
"aqlm": ["aqlm[gpu]>=1.1.0"],
|
"aqlm": ["aqlm[gpu]>=1.1.0"],
|
||||||
"vllm": ["vllm>=0.4.3,<=0.6.0"],
|
"vllm": ["vllm>=0.4.3,<=0.6.2"],
|
||||||
"galore": ["galore-torch"],
|
"galore": ["galore-torch"],
|
||||||
"badam": ["badam>=1.2.1"],
|
"badam": ["badam>=1.2.1"],
|
||||||
"adam-mini": ["adam-mini"],
|
"adam-mini": ["adam-mini"],
|
||||||
|
@ -4,6 +4,7 @@ from io import BytesIO
|
|||||||
from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, Tuple, TypedDict, Union
|
from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, Tuple, TypedDict, Union
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
from transformers.image_utils import get_image_size, to_numpy_array
|
||||||
from typing_extensions import override
|
from typing_extensions import override
|
||||||
|
|
||||||
from ..extras.constants import IGNORE_INDEX, IMAGE_PLACEHOLDER, VIDEO_PLACEHOLDER
|
from ..extras.constants import IGNORE_INDEX, IMAGE_PLACEHOLDER, VIDEO_PLACEHOLDER
|
||||||
@ -157,6 +158,7 @@ class BasePlugin:
|
|||||||
It holds num_patches == torch.prod(image_grid_thw)
|
It holds num_patches == torch.prod(image_grid_thw)
|
||||||
"""
|
"""
|
||||||
image_processor: "BaseImageProcessor" = getattr(processor, "image_processor")
|
image_processor: "BaseImageProcessor" = getattr(processor, "image_processor")
|
||||||
|
video_processor: "BaseImageProcessor" = getattr(processor, "video_processor", image_processor)
|
||||||
input_dict = {"images": None} # default key
|
input_dict = {"images": None} # default key
|
||||||
if len(images) != 0:
|
if len(images) != 0:
|
||||||
images = self._regularize_images(
|
images = self._regularize_images(
|
||||||
@ -174,10 +176,16 @@ class BasePlugin:
|
|||||||
)
|
)
|
||||||
input_dict["videos"] = videos
|
input_dict["videos"] = videos
|
||||||
|
|
||||||
if input_dict.get("images", None) is not None or input_dict.get("videos", None) is not None:
|
mm_inputs = {}
|
||||||
return image_processor(**input_dict, return_tensors="pt")
|
if image_processor != video_processor:
|
||||||
else:
|
if input_dict.get("images") is not None:
|
||||||
return {}
|
mm_inputs.update(image_processor(input_dict["images"], return_tensors="pt"))
|
||||||
|
if input_dict.get("videos") is not None:
|
||||||
|
mm_inputs.update(video_processor(input_dict["videos"], return_tensors="pt"))
|
||||||
|
elif input_dict.get("images") is not None or input_dict.get("videos") is not None: # same processor (qwen2-vl)
|
||||||
|
mm_inputs.update(image_processor(**input_dict, return_tensors="pt"))
|
||||||
|
|
||||||
|
return mm_inputs
|
||||||
|
|
||||||
def process_messages(
|
def process_messages(
|
||||||
self,
|
self,
|
||||||
@ -263,6 +271,122 @@ class LlavaPlugin(BasePlugin):
|
|||||||
return self._get_mm_inputs(images, videos, processor)
|
return self._get_mm_inputs(images, videos, processor)
|
||||||
|
|
||||||
|
|
||||||
|
class LlavaNextPlugin(BasePlugin):
|
||||||
|
@override
|
||||||
|
def process_messages(
|
||||||
|
self,
|
||||||
|
messages: Sequence[Dict[str, str]],
|
||||||
|
images: Sequence["ImageInput"],
|
||||||
|
videos: Sequence["VideoInput"],
|
||||||
|
processor: Optional["ProcessorMixin"],
|
||||||
|
) -> List[Dict[str, str]]:
|
||||||
|
self._validate_input(images, videos)
|
||||||
|
num_image_tokens = 0
|
||||||
|
messages = deepcopy(messages)
|
||||||
|
mm_inputs = self._get_mm_inputs(images, videos, processor)
|
||||||
|
if "image_sizes" in mm_inputs:
|
||||||
|
image_sizes = iter(mm_inputs["image_sizes"])
|
||||||
|
if "pixel_values" in mm_inputs:
|
||||||
|
height, width = get_image_size(to_numpy_array(mm_inputs["pixel_values"][0][0]))
|
||||||
|
for message in messages:
|
||||||
|
content = message["content"]
|
||||||
|
while self.image_token in content:
|
||||||
|
image_size = next(image_sizes)
|
||||||
|
orig_height, orig_width = image_size
|
||||||
|
image_seqlen = processor._get_number_of_features(orig_height, orig_width, height, width)
|
||||||
|
if processor.vision_feature_select_strategy == "default":
|
||||||
|
image_seqlen -= 1
|
||||||
|
num_image_tokens += 1
|
||||||
|
content = content.replace(self.image_token, "{{image}}" * image_seqlen, 1)
|
||||||
|
|
||||||
|
message["content"] = content.replace("{{image}}", self.image_token)
|
||||||
|
|
||||||
|
if len(images) != num_image_tokens:
|
||||||
|
raise ValueError("The number of images does not match the number of {} tokens".format(IMAGE_PLACEHOLDER))
|
||||||
|
return messages
|
||||||
|
|
||||||
|
@override
|
||||||
|
def get_mm_inputs(
|
||||||
|
self,
|
||||||
|
images: Sequence["ImageInput"],
|
||||||
|
videos: Sequence["VideoInput"],
|
||||||
|
imglens: Sequence[int],
|
||||||
|
vidlens: Sequence[int],
|
||||||
|
seqlens: Sequence[int],
|
||||||
|
processor: Optional["ProcessorMixin"],
|
||||||
|
) -> Dict[str, Union[List[int], "torch.Tensor"]]:
|
||||||
|
self._validate_input(images, videos)
|
||||||
|
res = self._get_mm_inputs(images, videos, processor)
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
class LlavaNextVideoPlugin(BasePlugin):
|
||||||
|
@override
|
||||||
|
def process_messages(
|
||||||
|
self,
|
||||||
|
messages: Sequence[Dict[str, str]],
|
||||||
|
images: Sequence["ImageInput"],
|
||||||
|
videos: Sequence["VideoInput"],
|
||||||
|
processor: Optional["ProcessorMixin"],
|
||||||
|
) -> List[Dict[str, str]]:
|
||||||
|
self._validate_input(images, videos)
|
||||||
|
num_image_tokens = 0
|
||||||
|
num_video_tokens = 0
|
||||||
|
messages = deepcopy(messages)
|
||||||
|
mm_inputs = self._get_mm_inputs(images, videos, processor)
|
||||||
|
if "pixel_values" in mm_inputs:
|
||||||
|
image_sizes = iter(mm_inputs["image_sizes"])
|
||||||
|
height, width = get_image_size(to_numpy_array(mm_inputs["pixel_values"][0][0]))
|
||||||
|
for message in messages:
|
||||||
|
content = message["content"]
|
||||||
|
|
||||||
|
while self.image_token in content:
|
||||||
|
image_size = next(image_sizes)
|
||||||
|
orig_height, orig_width = image_size
|
||||||
|
image_seqlen = processor._get_number_of_features(orig_height, orig_width, height, width)
|
||||||
|
if processor.vision_feature_select_strategy == "default":
|
||||||
|
image_seqlen -= 1
|
||||||
|
num_image_tokens += 1
|
||||||
|
content = content.replace(self.image_token, "{{image}}" * image_seqlen, 1)
|
||||||
|
|
||||||
|
message["content"] = content.replace("{{image}}", self.image_token)
|
||||||
|
|
||||||
|
if "pixel_values_videos" in mm_inputs:
|
||||||
|
pixel_values_video = to_numpy_array(mm_inputs.get("pixel_values_videos")[0])
|
||||||
|
height, width = get_image_size(pixel_values_video[0])
|
||||||
|
num_frames = pixel_values_video.shape[0] # frame dim is always after batch dim
|
||||||
|
image_seqlen = (height // processor.patch_size) * (width // processor.patch_size)
|
||||||
|
video_seqlen = image_seqlen // 4 * num_frames # divide by 4 needed for avg pooling layer
|
||||||
|
|
||||||
|
for message in messages:
|
||||||
|
content = message["content"]
|
||||||
|
while self.video_token in content:
|
||||||
|
num_video_tokens += 1
|
||||||
|
content = content.replace(self.video_token, "{{video}}", 1)
|
||||||
|
message["content"] = content.replace("{{video}}", self.video_token * video_seqlen)
|
||||||
|
|
||||||
|
if len(images) != num_image_tokens:
|
||||||
|
raise ValueError("The number of images does not match the number of {} tokens".format(IMAGE_PLACEHOLDER))
|
||||||
|
|
||||||
|
if len(videos) != num_video_tokens:
|
||||||
|
raise ValueError("The number of videos does not match the number of {} tokens".format(IMAGE_PLACEHOLDER))
|
||||||
|
|
||||||
|
return messages
|
||||||
|
|
||||||
|
@override
|
||||||
|
def get_mm_inputs(
|
||||||
|
self,
|
||||||
|
images: Sequence["ImageInput"],
|
||||||
|
videos: Sequence["VideoInput"],
|
||||||
|
imglens: Sequence[int],
|
||||||
|
vidlens: Sequence[int],
|
||||||
|
seqlens: Sequence[int],
|
||||||
|
processor: Optional["ProcessorMixin"],
|
||||||
|
) -> Dict[str, Union[List[int], "torch.Tensor"]]:
|
||||||
|
self._validate_input(images, videos)
|
||||||
|
return self._get_mm_inputs(images, videos, processor)
|
||||||
|
|
||||||
|
|
||||||
class PaliGemmaPlugin(BasePlugin):
|
class PaliGemmaPlugin(BasePlugin):
|
||||||
@override
|
@override
|
||||||
def process_messages(
|
def process_messages(
|
||||||
@ -417,11 +541,77 @@ class Qwen2vlPlugin(BasePlugin):
|
|||||||
return self._get_mm_inputs(images, videos, processor)
|
return self._get_mm_inputs(images, videos, processor)
|
||||||
|
|
||||||
|
|
||||||
|
class VideoLlavaPlugin(BasePlugin):
|
||||||
|
@override
|
||||||
|
def process_messages(
|
||||||
|
self,
|
||||||
|
messages: Sequence[Dict[str, str]],
|
||||||
|
images: Sequence["ImageInput"],
|
||||||
|
videos: Sequence["VideoInput"],
|
||||||
|
processor: Optional["ProcessorMixin"],
|
||||||
|
) -> List[Dict[str, str]]:
|
||||||
|
self._validate_input(images, videos)
|
||||||
|
num_image_tokens = 0
|
||||||
|
num_video_tokens = 0
|
||||||
|
messages = deepcopy(messages)
|
||||||
|
mm_inputs = self._get_mm_inputs(images, videos, processor)
|
||||||
|
num_frames = 0
|
||||||
|
exist_images = "pixel_values_images" in mm_inputs
|
||||||
|
exist_videos = "pixel_values_videos" in mm_inputs
|
||||||
|
if exist_videos or exist_images:
|
||||||
|
if exist_images:
|
||||||
|
height, width = get_image_size(to_numpy_array(mm_inputs.get("pixel_values_images")[0]))
|
||||||
|
num_frames = 1
|
||||||
|
if exist_videos:
|
||||||
|
pixel_values_video = to_numpy_array(mm_inputs.get("pixel_values_videos")[0])
|
||||||
|
height, width = get_image_size(pixel_values_video[0])
|
||||||
|
num_frames = pixel_values_video.shape[0] # frame dim is always after batch dim
|
||||||
|
image_seqlen = (height // processor.patch_size) * (width // processor.patch_size) + 1
|
||||||
|
video_seqlen = image_seqlen * num_frames
|
||||||
|
if processor.vision_feature_select_strategy == "default":
|
||||||
|
image_seqlen -= 1
|
||||||
|
for message in messages:
|
||||||
|
content = message["content"]
|
||||||
|
while self.image_token in content:
|
||||||
|
num_image_tokens += 1
|
||||||
|
content = content.replace(self.image_token, "{{image}}", 1)
|
||||||
|
while self.video_token in content:
|
||||||
|
num_video_tokens += 1
|
||||||
|
content = content.replace(self.video_token, "{{video}}", 1)
|
||||||
|
|
||||||
|
content = content.replace("{{image}}", self.image_token * image_seqlen)
|
||||||
|
message["content"] = content.replace("{{video}}", self.video_token * video_seqlen)
|
||||||
|
|
||||||
|
if len(images) != num_image_tokens:
|
||||||
|
raise ValueError("The number of images does not match the number of {} tokens".format(self.image_token))
|
||||||
|
|
||||||
|
if len(videos) != num_video_tokens:
|
||||||
|
raise ValueError("The number of videos does not match the number of {} tokens".format(self.video_token))
|
||||||
|
|
||||||
|
return messages
|
||||||
|
|
||||||
|
@override
|
||||||
|
def get_mm_inputs(
|
||||||
|
self,
|
||||||
|
images: Sequence["ImageInput"],
|
||||||
|
videos: Sequence["VideoInput"],
|
||||||
|
imglens: Sequence[int],
|
||||||
|
vidlens: Sequence[int],
|
||||||
|
seqlens: Sequence[int],
|
||||||
|
processor: Optional["ProcessorMixin"],
|
||||||
|
) -> Dict[str, Union[List[int], "torch.Tensor"]]:
|
||||||
|
self._validate_input(images, videos)
|
||||||
|
return self._get_mm_inputs(images, videos, processor)
|
||||||
|
|
||||||
|
|
||||||
PLUGINS = {
|
PLUGINS = {
|
||||||
"base": BasePlugin,
|
"base": BasePlugin,
|
||||||
"llava": LlavaPlugin,
|
"llava": LlavaPlugin,
|
||||||
|
"llava_next": LlavaNextPlugin,
|
||||||
|
"llava_next_video": LlavaNextVideoPlugin,
|
||||||
"paligemma": PaliGemmaPlugin,
|
"paligemma": PaliGemmaPlugin,
|
||||||
"qwen2_vl": Qwen2vlPlugin,
|
"qwen2_vl": Qwen2vlPlugin,
|
||||||
|
"video_llava": VideoLlavaPlugin,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -644,6 +644,14 @@ _register_template(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
_register_template(
|
||||||
|
name="exaone",
|
||||||
|
format_user=StringFormatter(slots=["[|user|]{{content}}\n[|assistant|]"]),
|
||||||
|
format_system=StringFormatter(slots=["[|system|]{{content}}[|endofturn|]\n"]),
|
||||||
|
format_separator=EmptyFormatter(slots=["\n"]),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
_register_template(
|
_register_template(
|
||||||
name="falcon",
|
name="falcon",
|
||||||
format_user=StringFormatter(slots=["User: {{content}}\nFalcon:"]),
|
format_user=StringFormatter(slots=["User: {{content}}\nFalcon:"]),
|
||||||
@ -760,6 +768,107 @@ _register_template(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
_register_template(
|
||||||
|
name="llava_next",
|
||||||
|
format_user=StringFormatter(slots=["USER: {{content}} ASSISTANT:"]),
|
||||||
|
default_system=(
|
||||||
|
"A chat between a curious user and an artificial intelligence assistant. "
|
||||||
|
"The assistant gives helpful, detailed, and polite answers to the user's questions."
|
||||||
|
),
|
||||||
|
mm_plugin=get_mm_plugin(name="llava_next", image_token="<image>"),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
_register_template(
|
||||||
|
name="llava_next_llama3",
|
||||||
|
format_user=StringFormatter(
|
||||||
|
slots=[
|
||||||
|
(
|
||||||
|
"<|start_header_id|>user<|end_header_id|>\n\n{{content}}<|eot_id|>"
|
||||||
|
"<|start_header_id|>assistant<|end_header_id|>\n\n"
|
||||||
|
)
|
||||||
|
]
|
||||||
|
),
|
||||||
|
format_system=StringFormatter(slots=["<|start_header_id|>system<|end_header_id|>\n\n{{content}}<|eot_id|>"]),
|
||||||
|
format_observation=StringFormatter(
|
||||||
|
slots=[
|
||||||
|
(
|
||||||
|
"<|start_header_id|>tool<|end_header_id|>\n\n{{content}}<|eot_id|>"
|
||||||
|
"<|start_header_id|>assistant<|end_header_id|>\n\n"
|
||||||
|
)
|
||||||
|
]
|
||||||
|
),
|
||||||
|
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
|
||||||
|
stop_words=["<|eot_id|>"],
|
||||||
|
replace_eos=True,
|
||||||
|
replace_jinja_template=False,
|
||||||
|
mm_plugin=get_mm_plugin(name="llava_next", image_token="<image>"),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
_register_template(
|
||||||
|
name="llava_next_mistral",
|
||||||
|
format_user=StringFormatter(slots=["[INST] {{content}} [/INST]"]),
|
||||||
|
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
|
||||||
|
mm_plugin=get_mm_plugin(name="llava_next", image_token="<image>"),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
_register_template(
|
||||||
|
name="llava_next_qwen",
|
||||||
|
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
|
||||||
|
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
|
||||||
|
format_observation=StringFormatter(slots=["<|im_start|>tool\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
|
||||||
|
format_separator=EmptyFormatter(slots=["\n"]),
|
||||||
|
default_system="You are a helpful assistant.",
|
||||||
|
stop_words=["<|im_end|>"],
|
||||||
|
replace_eos=True,
|
||||||
|
replace_jinja_template=False,
|
||||||
|
mm_plugin=get_mm_plugin(name="llava_next", image_token="<image>"),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
_register_template(
|
||||||
|
name="llava_next_yi",
|
||||||
|
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
|
||||||
|
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
|
||||||
|
format_separator=EmptyFormatter(slots=["\n"]),
|
||||||
|
stop_words=["<|im_end|>"],
|
||||||
|
replace_eos=True,
|
||||||
|
mm_plugin=get_mm_plugin(name="llava_next", image_token="<image>"),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
_register_template(
|
||||||
|
name="llava_next_video",
|
||||||
|
format_user=StringFormatter(slots=["USER: {{content}} ASSISTANT:"]),
|
||||||
|
default_system=(
|
||||||
|
"A chat between a curious user and an artificial intelligence assistant. "
|
||||||
|
"The assistant gives helpful, detailed, and polite answers to the user's questions."
|
||||||
|
),
|
||||||
|
mm_plugin=get_mm_plugin(name="llava_next_video", image_token="<image>", video_token="<video>"),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
_register_template(
|
||||||
|
name="llava_next_video_mistral",
|
||||||
|
format_user=StringFormatter(slots=["[INST] {{content}} [/INST]"]),
|
||||||
|
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
|
||||||
|
mm_plugin=get_mm_plugin(name="llava_next_video", image_token="<image>", video_token="<video>"),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
_register_template(
|
||||||
|
name="llava_next_video_yi",
|
||||||
|
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
|
||||||
|
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
|
||||||
|
format_separator=EmptyFormatter(slots=["\n"]),
|
||||||
|
stop_words=["<|im_end|>"],
|
||||||
|
replace_eos=True,
|
||||||
|
mm_plugin=get_mm_plugin(name="llava_next_video", image_token="<image>", video_token="<video>"),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
_register_template(
|
_register_template(
|
||||||
name="mistral",
|
name="mistral",
|
||||||
format_user=StringFormatter(slots=["[INST] {{content}} [/INST]"]),
|
format_user=StringFormatter(slots=["[INST] {{content}} [/INST]"]),
|
||||||
@ -906,6 +1015,17 @@ _register_template(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
_register_template(
|
||||||
|
name="video_llava",
|
||||||
|
format_user=StringFormatter(slots=["USER: {{content}} ASSISTANT:"]),
|
||||||
|
default_system=(
|
||||||
|
"A chat between a curious user and an artificial intelligence assistant. "
|
||||||
|
"The assistant gives helpful, detailed, and polite answers to the user's questions."
|
||||||
|
),
|
||||||
|
mm_plugin=get_mm_plugin(name="video_llava", image_token="<image>", video_token="<video>"),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
_register_template(
|
_register_template(
|
||||||
name="xuanyuan",
|
name="xuanyuan",
|
||||||
format_user=StringFormatter(slots=["Human: {{content}} Assistant:"]),
|
format_user=StringFormatter(slots=["Human: {{content}} Assistant:"]),
|
||||||
|
@ -114,17 +114,12 @@ def register_model_group(
|
|||||||
template: Optional[str] = None,
|
template: Optional[str] = None,
|
||||||
vision: bool = False,
|
vision: bool = False,
|
||||||
) -> None:
|
) -> None:
|
||||||
prefix = None
|
|
||||||
for name, path in models.items():
|
for name, path in models.items():
|
||||||
if prefix is None:
|
|
||||||
prefix = name.split("-")[0]
|
|
||||||
else:
|
|
||||||
assert prefix == name.split("-")[0], "prefix should be identical."
|
|
||||||
SUPPORTED_MODELS[name] = path
|
SUPPORTED_MODELS[name] = path
|
||||||
if template is not None:
|
if template is not None and any(suffix in name for suffix in ("-Chat", "-Instruct")):
|
||||||
DEFAULT_TEMPLATE[prefix] = template
|
DEFAULT_TEMPLATE[name] = template
|
||||||
if vision:
|
if vision:
|
||||||
VISION_MODELS.add(prefix)
|
VISION_MODELS.add(name)
|
||||||
|
|
||||||
|
|
||||||
register_model_group(
|
register_model_group(
|
||||||
@ -274,27 +269,27 @@ register_model_group(
|
|||||||
|
|
||||||
register_model_group(
|
register_model_group(
|
||||||
models={
|
models={
|
||||||
"ChineseLLaMA2-1.3B": {
|
"Chinese-Llama-2-1.3B": {
|
||||||
DownloadSource.DEFAULT: "hfl/chinese-llama-2-1.3b",
|
DownloadSource.DEFAULT: "hfl/chinese-llama-2-1.3b",
|
||||||
DownloadSource.MODELSCOPE: "AI-ModelScope/chinese-llama-2-1.3b",
|
DownloadSource.MODELSCOPE: "AI-ModelScope/chinese-llama-2-1.3b",
|
||||||
},
|
},
|
||||||
"ChineseLLaMA2-7B": {
|
"Chinese-Llama-2-7B": {
|
||||||
DownloadSource.DEFAULT: "hfl/chinese-llama-2-7b",
|
DownloadSource.DEFAULT: "hfl/chinese-llama-2-7b",
|
||||||
DownloadSource.MODELSCOPE: "AI-ModelScope/chinese-llama-2-7b",
|
DownloadSource.MODELSCOPE: "AI-ModelScope/chinese-llama-2-7b",
|
||||||
},
|
},
|
||||||
"ChineseLLaMA2-13B": {
|
"Chinese-Llama-2-13B": {
|
||||||
DownloadSource.DEFAULT: "hfl/chinese-llama-2-13b",
|
DownloadSource.DEFAULT: "hfl/chinese-llama-2-13b",
|
||||||
DownloadSource.MODELSCOPE: "AI-ModelScope/chinese-llama-2-13b",
|
DownloadSource.MODELSCOPE: "AI-ModelScope/chinese-llama-2-13b",
|
||||||
},
|
},
|
||||||
"ChineseLLaMA2-1.3B-Chat": {
|
"Chinese-Alpaca-2-1.3B-Chat": {
|
||||||
DownloadSource.DEFAULT: "hfl/chinese-alpaca-2-1.3b",
|
DownloadSource.DEFAULT: "hfl/chinese-alpaca-2-1.3b",
|
||||||
DownloadSource.MODELSCOPE: "AI-ModelScope/chinese-alpaca-2-1.3b",
|
DownloadSource.MODELSCOPE: "AI-ModelScope/chinese-alpaca-2-1.3b",
|
||||||
},
|
},
|
||||||
"ChineseLLaMA2-7B-Chat": {
|
"Chinese-Alpaca-2-7B-Chat": {
|
||||||
DownloadSource.DEFAULT: "hfl/chinese-alpaca-2-7b",
|
DownloadSource.DEFAULT: "hfl/chinese-alpaca-2-7b",
|
||||||
DownloadSource.MODELSCOPE: "AI-ModelScope/chinese-alpaca-2-7b",
|
DownloadSource.MODELSCOPE: "AI-ModelScope/chinese-alpaca-2-7b",
|
||||||
},
|
},
|
||||||
"ChineseLLaMA2-13B-Chat": {
|
"Chinese-Alpaca-2-13B-Chat": {
|
||||||
DownloadSource.DEFAULT: "hfl/chinese-alpaca-2-13b",
|
DownloadSource.DEFAULT: "hfl/chinese-alpaca-2-13b",
|
||||||
DownloadSource.MODELSCOPE: "AI-ModelScope/chinese-alpaca-2-13b",
|
DownloadSource.MODELSCOPE: "AI-ModelScope/chinese-alpaca-2-13b",
|
||||||
},
|
},
|
||||||
@ -450,25 +445,25 @@ register_model_group(
|
|||||||
|
|
||||||
register_model_group(
|
register_model_group(
|
||||||
models={
|
models={
|
||||||
"DeepSeekCoder-6.7B-Base": {
|
"DeepSeek-Coder-6.7B-Base": {
|
||||||
DownloadSource.DEFAULT: "deepseek-ai/deepseek-coder-6.7b-base",
|
DownloadSource.DEFAULT: "deepseek-ai/deepseek-coder-6.7b-base",
|
||||||
DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-coder-6.7b-base",
|
DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-coder-6.7b-base",
|
||||||
},
|
},
|
||||||
"DeepSeekCoder-7B-Base": {
|
"DeepSeek-Coder-7B-Base": {
|
||||||
DownloadSource.DEFAULT: "deepseek-ai/deepseek-coder-7b-base-v1.5",
|
DownloadSource.DEFAULT: "deepseek-ai/deepseek-coder-7b-base-v1.5",
|
||||||
},
|
},
|
||||||
"DeepSeekCoder-33B-Base": {
|
"DeepSeek-Coder-33B-Base": {
|
||||||
DownloadSource.DEFAULT: "deepseek-ai/deepseek-coder-33b-base",
|
DownloadSource.DEFAULT: "deepseek-ai/deepseek-coder-33b-base",
|
||||||
DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-coder-33b-base",
|
DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-coder-33b-base",
|
||||||
},
|
},
|
||||||
"DeepSeekCoder-6.7B-Instruct": {
|
"DeepSeek-Coder-6.7B-Instruct": {
|
||||||
DownloadSource.DEFAULT: "deepseek-ai/deepseek-coder-6.7b-instruct",
|
DownloadSource.DEFAULT: "deepseek-ai/deepseek-coder-6.7b-instruct",
|
||||||
DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-coder-6.7b-instruct",
|
DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-coder-6.7b-instruct",
|
||||||
},
|
},
|
||||||
"DeepSeekCoder-7B-Instruct": {
|
"DeepSeek-Coder-7B-Instruct": {
|
||||||
DownloadSource.DEFAULT: "deepseek-ai/deepseek-coder-7b-instruct-v1.5",
|
DownloadSource.DEFAULT: "deepseek-ai/deepseek-coder-7b-instruct-v1.5",
|
||||||
},
|
},
|
||||||
"DeepSeekCoder-33B-Instruct": {
|
"DeepSeek-Coder-33B-Instruct": {
|
||||||
DownloadSource.DEFAULT: "deepseek-ai/deepseek-coder-33b-instruct",
|
DownloadSource.DEFAULT: "deepseek-ai/deepseek-coder-33b-instruct",
|
||||||
DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-coder-33b-instruct",
|
DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-coder-33b-instruct",
|
||||||
},
|
},
|
||||||
@ -477,6 +472,16 @@ register_model_group(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
register_model_group(
|
||||||
|
models={
|
||||||
|
"EXAONE-3.0-7.8B-Instruct": {
|
||||||
|
DownloadSource.DEFAULT: "LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
template="exaone",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
register_model_group(
|
register_model_group(
|
||||||
models={
|
models={
|
||||||
"Falcon-7B": {
|
"Falcon-7B": {
|
||||||
@ -624,13 +629,6 @@ register_model_group(
|
|||||||
DownloadSource.DEFAULT: "internlm/internlm2-chat-20b",
|
DownloadSource.DEFAULT: "internlm/internlm2-chat-20b",
|
||||||
DownloadSource.MODELSCOPE: "Shanghai_AI_Laboratory/internlm2-chat-20b",
|
DownloadSource.MODELSCOPE: "Shanghai_AI_Laboratory/internlm2-chat-20b",
|
||||||
},
|
},
|
||||||
},
|
|
||||||
template="intern2",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
register_model_group(
|
|
||||||
models={
|
|
||||||
"InternLM2.5-1.8B": {
|
"InternLM2.5-1.8B": {
|
||||||
DownloadSource.DEFAULT: "internlm/internlm2_5-1_8b",
|
DownloadSource.DEFAULT: "internlm/internlm2_5-1_8b",
|
||||||
DownloadSource.MODELSCOPE: "Shanghai_AI_Laboratory/internlm2_5-1_8b",
|
DownloadSource.MODELSCOPE: "Shanghai_AI_Laboratory/internlm2_5-1_8b",
|
||||||
@ -686,19 +684,19 @@ register_model_group(
|
|||||||
|
|
||||||
register_model_group(
|
register_model_group(
|
||||||
models={
|
models={
|
||||||
"LLaMA-7B": {
|
"Llama-7B": {
|
||||||
DownloadSource.DEFAULT: "huggyllama/llama-7b",
|
DownloadSource.DEFAULT: "huggyllama/llama-7b",
|
||||||
DownloadSource.MODELSCOPE: "skyline2006/llama-7b",
|
DownloadSource.MODELSCOPE: "skyline2006/llama-7b",
|
||||||
},
|
},
|
||||||
"LLaMA-13B": {
|
"Llama-13B": {
|
||||||
DownloadSource.DEFAULT: "huggyllama/llama-13b",
|
DownloadSource.DEFAULT: "huggyllama/llama-13b",
|
||||||
DownloadSource.MODELSCOPE: "skyline2006/llama-13b",
|
DownloadSource.MODELSCOPE: "skyline2006/llama-13b",
|
||||||
},
|
},
|
||||||
"LLaMA-30B": {
|
"Llama-30B": {
|
||||||
DownloadSource.DEFAULT: "huggyllama/llama-30b",
|
DownloadSource.DEFAULT: "huggyllama/llama-30b",
|
||||||
DownloadSource.MODELSCOPE: "skyline2006/llama-30b",
|
DownloadSource.MODELSCOPE: "skyline2006/llama-30b",
|
||||||
},
|
},
|
||||||
"LLaMA-65B": {
|
"Llama-65B": {
|
||||||
DownloadSource.DEFAULT: "huggyllama/llama-65b",
|
DownloadSource.DEFAULT: "huggyllama/llama-65b",
|
||||||
DownloadSource.MODELSCOPE: "skyline2006/llama-65b",
|
DownloadSource.MODELSCOPE: "skyline2006/llama-65b",
|
||||||
},
|
},
|
||||||
@ -708,27 +706,27 @@ register_model_group(
|
|||||||
|
|
||||||
register_model_group(
|
register_model_group(
|
||||||
models={
|
models={
|
||||||
"LLaMA2-7B": {
|
"Llama-2-7B": {
|
||||||
DownloadSource.DEFAULT: "meta-llama/Llama-2-7b-hf",
|
DownloadSource.DEFAULT: "meta-llama/Llama-2-7b-hf",
|
||||||
DownloadSource.MODELSCOPE: "modelscope/Llama-2-7b-ms",
|
DownloadSource.MODELSCOPE: "modelscope/Llama-2-7b-ms",
|
||||||
},
|
},
|
||||||
"LLaMA2-13B": {
|
"Llama-2-13B": {
|
||||||
DownloadSource.DEFAULT: "meta-llama/Llama-2-13b-hf",
|
DownloadSource.DEFAULT: "meta-llama/Llama-2-13b-hf",
|
||||||
DownloadSource.MODELSCOPE: "modelscope/Llama-2-13b-ms",
|
DownloadSource.MODELSCOPE: "modelscope/Llama-2-13b-ms",
|
||||||
},
|
},
|
||||||
"LLaMA2-70B": {
|
"Llama-2-70B": {
|
||||||
DownloadSource.DEFAULT: "meta-llama/Llama-2-70b-hf",
|
DownloadSource.DEFAULT: "meta-llama/Llama-2-70b-hf",
|
||||||
DownloadSource.MODELSCOPE: "modelscope/Llama-2-70b-ms",
|
DownloadSource.MODELSCOPE: "modelscope/Llama-2-70b-ms",
|
||||||
},
|
},
|
||||||
"LLaMA2-7B-Chat": {
|
"Llama-2-7B-Chat": {
|
||||||
DownloadSource.DEFAULT: "meta-llama/Llama-2-7b-chat-hf",
|
DownloadSource.DEFAULT: "meta-llama/Llama-2-7b-chat-hf",
|
||||||
DownloadSource.MODELSCOPE: "modelscope/Llama-2-7b-chat-ms",
|
DownloadSource.MODELSCOPE: "modelscope/Llama-2-7b-chat-ms",
|
||||||
},
|
},
|
||||||
"LLaMA2-13B-Chat": {
|
"Llama-2-13B-Chat": {
|
||||||
DownloadSource.DEFAULT: "meta-llama/Llama-2-13b-chat-hf",
|
DownloadSource.DEFAULT: "meta-llama/Llama-2-13b-chat-hf",
|
||||||
DownloadSource.MODELSCOPE: "modelscope/Llama-2-13b-chat-ms",
|
DownloadSource.MODELSCOPE: "modelscope/Llama-2-13b-chat-ms",
|
||||||
},
|
},
|
||||||
"LLaMA2-70B-Chat": {
|
"Llama-2-70B-Chat": {
|
||||||
DownloadSource.DEFAULT: "meta-llama/Llama-2-70b-chat-hf",
|
DownloadSource.DEFAULT: "meta-llama/Llama-2-70b-chat-hf",
|
||||||
DownloadSource.MODELSCOPE: "modelscope/Llama-2-70b-chat-ms",
|
DownloadSource.MODELSCOPE: "modelscope/Llama-2-70b-chat-ms",
|
||||||
},
|
},
|
||||||
@ -739,60 +737,77 @@ register_model_group(
|
|||||||
|
|
||||||
register_model_group(
|
register_model_group(
|
||||||
models={
|
models={
|
||||||
"LLaMA3-8B": {
|
"Llama-3-8B": {
|
||||||
DownloadSource.DEFAULT: "meta-llama/Meta-Llama-3-8B",
|
DownloadSource.DEFAULT: "meta-llama/Meta-Llama-3-8B",
|
||||||
DownloadSource.MODELSCOPE: "LLM-Research/Meta-Llama-3-8B",
|
DownloadSource.MODELSCOPE: "LLM-Research/Meta-Llama-3-8B",
|
||||||
},
|
},
|
||||||
"LLaMA3-70B": {
|
"Llama-3-70B": {
|
||||||
DownloadSource.DEFAULT: "meta-llama/Meta-Llama-3-70B",
|
DownloadSource.DEFAULT: "meta-llama/Meta-Llama-3-70B",
|
||||||
DownloadSource.MODELSCOPE: "LLM-Research/Meta-Llama-3-70B",
|
DownloadSource.MODELSCOPE: "LLM-Research/Meta-Llama-3-70B",
|
||||||
},
|
},
|
||||||
"LLaMA3-8B-Instruct": {
|
"Llama-3-8B-Instruct": {
|
||||||
DownloadSource.DEFAULT: "meta-llama/Meta-Llama-3-8B-Instruct",
|
DownloadSource.DEFAULT: "meta-llama/Meta-Llama-3-8B-Instruct",
|
||||||
DownloadSource.MODELSCOPE: "LLM-Research/Meta-Llama-3-8B-Instruct",
|
DownloadSource.MODELSCOPE: "LLM-Research/Meta-Llama-3-8B-Instruct",
|
||||||
},
|
},
|
||||||
"LLaMA3-70B-Instruct": {
|
"Llama-3-70B-Instruct": {
|
||||||
DownloadSource.DEFAULT: "meta-llama/Meta-Llama-3-70B-Instruct",
|
DownloadSource.DEFAULT: "meta-llama/Meta-Llama-3-70B-Instruct",
|
||||||
DownloadSource.MODELSCOPE: "LLM-Research/Meta-Llama-3-70B-Instruct",
|
DownloadSource.MODELSCOPE: "LLM-Research/Meta-Llama-3-70B-Instruct",
|
||||||
},
|
},
|
||||||
"LLaMA3-8B-Chinese-Chat": {
|
"Llama-3-8B-Chinese-Chat": {
|
||||||
DownloadSource.DEFAULT: "shenzhi-wang/Llama3-8B-Chinese-Chat",
|
DownloadSource.DEFAULT: "shenzhi-wang/Llama3-8B-Chinese-Chat",
|
||||||
DownloadSource.MODELSCOPE: "LLM-Research/Llama3-8B-Chinese-Chat",
|
DownloadSource.MODELSCOPE: "LLM-Research/Llama3-8B-Chinese-Chat",
|
||||||
},
|
},
|
||||||
"LLaMA3-70B-Chinese-Chat": {
|
"Llama-3-70B-Chinese-Chat": {
|
||||||
DownloadSource.DEFAULT: "shenzhi-wang/Llama3-70B-Chinese-Chat",
|
DownloadSource.DEFAULT: "shenzhi-wang/Llama3-70B-Chinese-Chat",
|
||||||
},
|
},
|
||||||
},
|
"Llama-3.1-8B": {
|
||||||
template="llama3",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
register_model_group(
|
|
||||||
models={
|
|
||||||
"LLaMA3.1-8B": {
|
|
||||||
DownloadSource.DEFAULT: "meta-llama/Meta-Llama-3.1-8B",
|
DownloadSource.DEFAULT: "meta-llama/Meta-Llama-3.1-8B",
|
||||||
DownloadSource.MODELSCOPE: "LLM-Research/Meta-Llama-3.1-8B",
|
DownloadSource.MODELSCOPE: "LLM-Research/Meta-Llama-3.1-8B",
|
||||||
},
|
},
|
||||||
"LLaMA3.1-70B": {
|
"Llama-3.1-70B": {
|
||||||
DownloadSource.DEFAULT: "meta-llama/Meta-Llama-3.1-70B",
|
DownloadSource.DEFAULT: "meta-llama/Meta-Llama-3.1-70B",
|
||||||
DownloadSource.MODELSCOPE: "LLM-Research/Meta-Llama-3.1-70B",
|
DownloadSource.MODELSCOPE: "LLM-Research/Meta-Llama-3.1-70B",
|
||||||
},
|
},
|
||||||
"LLaMA3.1-405B": {
|
"Llama-3.1-405B": {
|
||||||
DownloadSource.DEFAULT: "meta-llama/Meta-Llama-3.1-405B",
|
DownloadSource.DEFAULT: "meta-llama/Meta-Llama-3.1-405B",
|
||||||
DownloadSource.MODELSCOPE: "LLM-Research/Meta-Llama-3.1-405B",
|
DownloadSource.MODELSCOPE: "LLM-Research/Meta-Llama-3.1-405B",
|
||||||
},
|
},
|
||||||
"LLaMA3.1-8B-Instruct": {
|
"Llama-3.1-8B-Instruct": {
|
||||||
DownloadSource.DEFAULT: "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
DownloadSource.DEFAULT: "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
||||||
DownloadSource.MODELSCOPE: "LLM-Research/Meta-Llama-3.1-8B-Instruct",
|
DownloadSource.MODELSCOPE: "LLM-Research/Meta-Llama-3.1-8B-Instruct",
|
||||||
},
|
},
|
||||||
"LLaMA3.1-70B-Instruct": {
|
"Llama-3.1-70B-Instruct": {
|
||||||
DownloadSource.DEFAULT: "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
DownloadSource.DEFAULT: "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
||||||
DownloadSource.MODELSCOPE: "LLM-Research/Meta-Llama-3.1-70B-Instruct",
|
DownloadSource.MODELSCOPE: "LLM-Research/Meta-Llama-3.1-70B-Instruct",
|
||||||
},
|
},
|
||||||
"LLaMA3.1-405B-Instruct": {
|
"Llama-3.1-405B-Instruct": {
|
||||||
DownloadSource.DEFAULT: "meta-llama/Meta-Llama-3.1-405B-Instruct",
|
DownloadSource.DEFAULT: "meta-llama/Meta-Llama-3.1-405B-Instruct",
|
||||||
DownloadSource.MODELSCOPE: "LLM-Research/Meta-Llama-3.1-405B-Instruct",
|
DownloadSource.MODELSCOPE: "LLM-Research/Meta-Llama-3.1-405B-Instruct",
|
||||||
},
|
},
|
||||||
|
"Llama-3.1-8B-Chinese-Chat": {
|
||||||
|
DownloadSource.DEFAULT: "shenzhi-wang/Llama3.1-8B-Chinese-Chat",
|
||||||
|
DownloadSource.MODELSCOPE: "XD_AI/Llama3.1-8B-Chinese-Chat",
|
||||||
|
},
|
||||||
|
"Llama-3.1-70B-Chinese-Chat": {
|
||||||
|
DownloadSource.DEFAULT: "shenzhi-wang/Llama3.1-70B-Chinese-Chat",
|
||||||
|
DownloadSource.MODELSCOPE: "XD_AI/Llama3.1-70B-Chinese-Chat",
|
||||||
|
},
|
||||||
|
"Llama-3.2-1B": {
|
||||||
|
DownloadSource.DEFAULT: "meta-llama/Llama-3.2-1B",
|
||||||
|
DownloadSource.MODELSCOPE: "LLM-Research/Llama-3.2-1B",
|
||||||
|
},
|
||||||
|
"Llama-3.2-3B": {
|
||||||
|
DownloadSource.DEFAULT: "meta-llama/Llama-3.2-3B",
|
||||||
|
DownloadSource.MODELSCOPE: "LLM-Research/Llama-3.2-3B",
|
||||||
|
},
|
||||||
|
"Llama-3.2-1B-Instruct": {
|
||||||
|
DownloadSource.DEFAULT: "meta-llama/Llama-3.2-1B-Instruct",
|
||||||
|
DownloadSource.MODELSCOPE: "LLM-Research/Llama-3.2-1B-Instruct",
|
||||||
|
},
|
||||||
|
"Llama-3.2-3B-Instruct": {
|
||||||
|
DownloadSource.DEFAULT: "meta-llama/Llama-3.2-3B-Instruct",
|
||||||
|
DownloadSource.MODELSCOPE: "LLM-Research/Llama-3.2-3B-Instruct",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
template="llama3",
|
template="llama3",
|
||||||
)
|
)
|
||||||
@ -800,11 +815,13 @@ register_model_group(
|
|||||||
|
|
||||||
register_model_group(
|
register_model_group(
|
||||||
models={
|
models={
|
||||||
"LLaVA1.5-7B-Chat": {
|
"LLaVA-1.5-7B-Chat": {
|
||||||
DownloadSource.DEFAULT: "llava-hf/llava-1.5-7b-hf",
|
DownloadSource.DEFAULT: "llava-hf/llava-1.5-7b-hf",
|
||||||
|
DownloadSource.MODELSCOPE: "swift/llava-1.5-7b-hf",
|
||||||
},
|
},
|
||||||
"LLaVA1.5-13B-Chat": {
|
"LLaVA-1.5-13B-Chat": {
|
||||||
DownloadSource.DEFAULT: "llava-hf/llava-1.5-13b-hf",
|
DownloadSource.DEFAULT: "llava-hf/llava-1.5-13b-hf",
|
||||||
|
DownloadSource.MODELSCOPE: "swift/llava-1.5-13b-hf",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
template="llava",
|
template="llava",
|
||||||
@ -812,6 +829,117 @@ register_model_group(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
register_model_group(
|
||||||
|
models={
|
||||||
|
"LLaVA-NeXT-7B-Chat": {
|
||||||
|
DownloadSource.DEFAULT: "llava-hf/llava-v1.6-vicuna-7b-hf",
|
||||||
|
DownloadSource.MODELSCOPE: "swift/llava-v1.6-vicuna-7b-hf",
|
||||||
|
},
|
||||||
|
"LLaVA-NeXT-13B-Chat": {
|
||||||
|
DownloadSource.DEFAULT: "llava-hf/llava-v1.6-vicuna-13b-hf",
|
||||||
|
DownloadSource.MODELSCOPE: "swift/llava-v1.6-vicuna-13b-hf",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
template="llava_next",
|
||||||
|
vision=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
register_model_group(
|
||||||
|
models={
|
||||||
|
"LLaVA-NeXT-Mistral-7B-Chat": {
|
||||||
|
DownloadSource.DEFAULT: "llava-hf/llava-v1.6-mistral-7b-hf",
|
||||||
|
DownloadSource.MODELSCOPE: "swift/llava-v1.6-mistral-7b-hf",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
template="llava_next_mistral",
|
||||||
|
vision=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
register_model_group(
|
||||||
|
models={
|
||||||
|
"LLaVA-NeXT-Llama3-8B-Chat": {
|
||||||
|
DownloadSource.DEFAULT: "llava-hf/llama3-llava-next-8b-hf",
|
||||||
|
DownloadSource.MODELSCOPE: "swift/llama3-llava-next-8b-hf",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
template="llava_next_llama3",
|
||||||
|
vision=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
register_model_group(
|
||||||
|
models={
|
||||||
|
"LLaVA-NeXT-34B-Chat": {
|
||||||
|
DownloadSource.DEFAULT: "llava-hf/llava-v1.6-34b-hf",
|
||||||
|
DownloadSource.MODELSCOPE: "LLM-Research/llava-v1.6-34b-hf",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
template="llava_next_yi",
|
||||||
|
vision=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
register_model_group(
|
||||||
|
models={
|
||||||
|
"LLaVA-NeXT-72B-Chat": {
|
||||||
|
DownloadSource.DEFAULT: "llava-hf/llava-next-72b-hf",
|
||||||
|
DownloadSource.MODELSCOPE: "AI-ModelScope/llava-next-72b-hf",
|
||||||
|
},
|
||||||
|
"LLaVA-NeXT-110B-Chat": {
|
||||||
|
DownloadSource.DEFAULT: "llava-hf/llava-next-110b-hf",
|
||||||
|
DownloadSource.MODELSCOPE: "AI-ModelScope/llava-next-110b-hf",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
template="llava_next_qwen",
|
||||||
|
vision=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
register_model_group(
|
||||||
|
models={
|
||||||
|
"LLaVA-NeXT-Video-7B-Chat": {
|
||||||
|
DownloadSource.DEFAULT: "llava-hf/LLaVA-NeXT-Video-7B-hf",
|
||||||
|
DownloadSource.MODELSCOPE: "swift/LLaVA-NeXT-Video-7B-hf",
|
||||||
|
},
|
||||||
|
"LLaVA-NeXT-Video-7B-DPO-Chat": {
|
||||||
|
DownloadSource.DEFAULT: "llava-hf/LLaVA-NeXT-Video-7B-DPO-hf",
|
||||||
|
DownloadSource.MODELSCOPE: "swift/LLaVA-NeXT-Video-7B-DPO-hf",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
template="llava_next_video",
|
||||||
|
vision=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
register_model_group(
|
||||||
|
models={
|
||||||
|
"LLaVA-NeXT-Video-7B-32k-Chat": {
|
||||||
|
DownloadSource.DEFAULT: "llava-hf/LLaVA-NeXT-Video-7B-32K-hf",
|
||||||
|
DownloadSource.MODELSCOPE: "swift/LLaVA-NeXT-Video-7B-32K-hf",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
template="llava_next_video_mistral",
|
||||||
|
vision=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
register_model_group(
|
||||||
|
models={
|
||||||
|
"LLaVA-NeXT-Video-34B-Chat": {
|
||||||
|
DownloadSource.DEFAULT: "llava-hf/LLaVA-NeXT-Video-34B-hf",
|
||||||
|
DownloadSource.MODELSCOPE: "swift/LLaVA-NeXT-Video-34B-hf",
|
||||||
|
},
|
||||||
|
"LLaVA-NeXT-Video-34B-DPO-Chat": {
|
||||||
|
DownloadSource.DEFAULT: "llava-hf/LLaVA-NeXT-Video-34B-DPO-hf",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
template="llava_next_video_yi",
|
||||||
|
vision=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
register_model_group(
|
register_model_group(
|
||||||
models={
|
models={
|
||||||
"MiniCPM-2B-SFT-Chat": {
|
"MiniCPM-2B-SFT-Chat": {
|
||||||
@ -1005,27 +1133,27 @@ register_model_group(
|
|||||||
|
|
||||||
register_model_group(
|
register_model_group(
|
||||||
models={
|
models={
|
||||||
"Phi3-4B-4k-Instruct": {
|
"Phi-3-4B-4k-Instruct": {
|
||||||
DownloadSource.DEFAULT: "microsoft/Phi-3-mini-4k-instruct",
|
DownloadSource.DEFAULT: "microsoft/Phi-3-mini-4k-instruct",
|
||||||
DownloadSource.MODELSCOPE: "LLM-Research/Phi-3-mini-4k-instruct",
|
DownloadSource.MODELSCOPE: "LLM-Research/Phi-3-mini-4k-instruct",
|
||||||
},
|
},
|
||||||
"Phi3-4B-128k-Instruct": {
|
"Phi-3-4B-128k-Instruct": {
|
||||||
DownloadSource.DEFAULT: "microsoft/Phi-3-mini-128k-instruct",
|
DownloadSource.DEFAULT: "microsoft/Phi-3-mini-128k-instruct",
|
||||||
DownloadSource.MODELSCOPE: "LLM-Research/Phi-3-mini-128k-instruct",
|
DownloadSource.MODELSCOPE: "LLM-Research/Phi-3-mini-128k-instruct",
|
||||||
},
|
},
|
||||||
"Phi3-7B-8k-Instruct": {
|
"Phi-3-7B-8k-Instruct": {
|
||||||
DownloadSource.DEFAULT: "microsoft/Phi-3-small-8k-instruct",
|
DownloadSource.DEFAULT: "microsoft/Phi-3-small-8k-instruct",
|
||||||
DownloadSource.MODELSCOPE: "LLM-Research/Phi-3-small-8k-instruct",
|
DownloadSource.MODELSCOPE: "LLM-Research/Phi-3-small-8k-instruct",
|
||||||
},
|
},
|
||||||
"Phi3-7B-128k-Instruct": {
|
"Phi-3-7B-128k-Instruct": {
|
||||||
DownloadSource.DEFAULT: "microsoft/Phi-3-small-128k-instruct",
|
DownloadSource.DEFAULT: "microsoft/Phi-3-small-128k-instruct",
|
||||||
DownloadSource.MODELSCOPE: "LLM-Research/Phi-3-small-128k-instruct",
|
DownloadSource.MODELSCOPE: "LLM-Research/Phi-3-small-128k-instruct",
|
||||||
},
|
},
|
||||||
"Phi3-14B-8k-Instruct": {
|
"Phi-3-14B-8k-Instruct": {
|
||||||
DownloadSource.DEFAULT: "microsoft/Phi-3-medium-4k-instruct",
|
DownloadSource.DEFAULT: "microsoft/Phi-3-medium-4k-instruct",
|
||||||
DownloadSource.MODELSCOPE: "LLM-Research/Phi-3-medium-4k-instruct",
|
DownloadSource.MODELSCOPE: "LLM-Research/Phi-3-medium-4k-instruct",
|
||||||
},
|
},
|
||||||
"Phi3-14B-128k-Instruct": {
|
"Phi-3-14B-128k-Instruct": {
|
||||||
DownloadSource.DEFAULT: "microsoft/Phi-3-medium-128k-instruct",
|
DownloadSource.DEFAULT: "microsoft/Phi-3-medium-128k-instruct",
|
||||||
DownloadSource.MODELSCOPE: "LLM-Research/Phi-3-medium-128k-instruct",
|
DownloadSource.MODELSCOPE: "LLM-Research/Phi-3-medium-128k-instruct",
|
||||||
},
|
},
|
||||||
@ -1068,35 +1196,35 @@ register_model_group(
|
|||||||
DownloadSource.DEFAULT: "Qwen/Qwen-72B-Chat",
|
DownloadSource.DEFAULT: "Qwen/Qwen-72B-Chat",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen-72B-Chat",
|
DownloadSource.MODELSCOPE: "qwen/Qwen-72B-Chat",
|
||||||
},
|
},
|
||||||
"Qwen-1.8B-int8-Chat": {
|
"Qwen-1.8B-Chat-Int8": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen-1_8B-Chat-Int8",
|
DownloadSource.DEFAULT: "Qwen/Qwen-1_8B-Chat-Int8",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen-1_8B-Chat-Int8",
|
DownloadSource.MODELSCOPE: "qwen/Qwen-1_8B-Chat-Int8",
|
||||||
},
|
},
|
||||||
"Qwen-1.8B-int4-Chat": {
|
"Qwen-1.8B-Chat-Int4": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen-1_8B-Chat-Int4",
|
DownloadSource.DEFAULT: "Qwen/Qwen-1_8B-Chat-Int4",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen-1_8B-Chat-Int4",
|
DownloadSource.MODELSCOPE: "qwen/Qwen-1_8B-Chat-Int4",
|
||||||
},
|
},
|
||||||
"Qwen-7B-int8-Chat": {
|
"Qwen-7B-Chat-Int8": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen-7B-Chat-Int8",
|
DownloadSource.DEFAULT: "Qwen/Qwen-7B-Chat-Int8",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen-7B-Chat-Int8",
|
DownloadSource.MODELSCOPE: "qwen/Qwen-7B-Chat-Int8",
|
||||||
},
|
},
|
||||||
"Qwen-7B-int4-Chat": {
|
"Qwen-7B-Chat-Int4": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen-7B-Chat-Int4",
|
DownloadSource.DEFAULT: "Qwen/Qwen-7B-Chat-Int4",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen-7B-Chat-Int4",
|
DownloadSource.MODELSCOPE: "qwen/Qwen-7B-Chat-Int4",
|
||||||
},
|
},
|
||||||
"Qwen-14B-int8-Chat": {
|
"Qwen-14B-Chat-Int8": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen-14B-Chat-Int8",
|
DownloadSource.DEFAULT: "Qwen/Qwen-14B-Chat-Int8",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen-14B-Chat-Int8",
|
DownloadSource.MODELSCOPE: "qwen/Qwen-14B-Chat-Int8",
|
||||||
},
|
},
|
||||||
"Qwen-14B-int4-Chat": {
|
"Qwen-14B-Chat-Int4": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen-14B-Chat-Int4",
|
DownloadSource.DEFAULT: "Qwen/Qwen-14B-Chat-Int4",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen-14B-Chat-Int4",
|
DownloadSource.MODELSCOPE: "qwen/Qwen-14B-Chat-Int4",
|
||||||
},
|
},
|
||||||
"Qwen-72B-int8-Chat": {
|
"Qwen-72B-Chat-Int8": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen-72B-Chat-Int8",
|
DownloadSource.DEFAULT: "Qwen/Qwen-72B-Chat-Int8",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen-72B-Chat-Int8",
|
DownloadSource.MODELSCOPE: "qwen/Qwen-72B-Chat-Int8",
|
||||||
},
|
},
|
||||||
"Qwen-72B-int4-Chat": {
|
"Qwen-72B-Chat-Int4": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen-72B-Chat-Int4",
|
DownloadSource.DEFAULT: "Qwen/Qwen-72B-Chat-Int4",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen-72B-Chat-Int4",
|
DownloadSource.MODELSCOPE: "qwen/Qwen-72B-Chat-Int4",
|
||||||
},
|
},
|
||||||
@ -1179,75 +1307,75 @@ register_model_group(
|
|||||||
DownloadSource.DEFAULT: "Qwen/Qwen1.5-MoE-A2.7B-Chat",
|
DownloadSource.DEFAULT: "Qwen/Qwen1.5-MoE-A2.7B-Chat",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-MoE-A2.7B-Chat",
|
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-MoE-A2.7B-Chat",
|
||||||
},
|
},
|
||||||
"Qwen1.5-0.5B-int8-Chat": {
|
"Qwen1.5-0.5B-Chat-GPTQ-Int8": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen1.5-0.5B-Chat-GPTQ-Int8",
|
DownloadSource.DEFAULT: "Qwen/Qwen1.5-0.5B-Chat-GPTQ-Int8",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-0.5B-Chat-GPTQ-Int8",
|
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-0.5B-Chat-GPTQ-Int8",
|
||||||
},
|
},
|
||||||
"Qwen1.5-0.5B-int4-Chat": {
|
"Qwen1.5-0.5B-Chat-AWQ": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen1.5-0.5B-Chat-AWQ",
|
DownloadSource.DEFAULT: "Qwen/Qwen1.5-0.5B-Chat-AWQ",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-0.5B-Chat-AWQ",
|
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-0.5B-Chat-AWQ",
|
||||||
},
|
},
|
||||||
"Qwen1.5-1.8B-int8-Chat": {
|
"Qwen1.5-1.8B-Chat-GPTQ-Int8": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen1.5-1.8B-Chat-GPTQ-Int8",
|
DownloadSource.DEFAULT: "Qwen/Qwen1.5-1.8B-Chat-GPTQ-Int8",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-1.8B-Chat-GPTQ-Int8",
|
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-1.8B-Chat-GPTQ-Int8",
|
||||||
},
|
},
|
||||||
"Qwen1.5-1.8B-int4-Chat": {
|
"Qwen1.5-1.8B-Chat-AWQ": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen1.5-1.8B-Chat-AWQ",
|
DownloadSource.DEFAULT: "Qwen/Qwen1.5-1.8B-Chat-AWQ",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-1.8B-Chat-AWQ",
|
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-1.8B-Chat-AWQ",
|
||||||
},
|
},
|
||||||
"Qwen1.5-4B-int8-Chat": {
|
"Qwen1.5-4B-Chat-GPTQ-Int8": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen1.5-4B-Chat-GPTQ-Int8",
|
DownloadSource.DEFAULT: "Qwen/Qwen1.5-4B-Chat-GPTQ-Int8",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-4B-Chat-GPTQ-Int8",
|
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-4B-Chat-GPTQ-Int8",
|
||||||
},
|
},
|
||||||
"Qwen1.5-4B-int4-Chat": {
|
"Qwen1.5-4B-Chat-AWQ": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen1.5-4B-Chat-AWQ",
|
DownloadSource.DEFAULT: "Qwen/Qwen1.5-4B-Chat-AWQ",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-4B-Chat-AWQ",
|
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-4B-Chat-AWQ",
|
||||||
},
|
},
|
||||||
"Qwen1.5-7B-int8-Chat": {
|
"Qwen1.5-7B-Chat-GPTQ-Int8": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen1.5-7B-Chat-GPTQ-Int8",
|
DownloadSource.DEFAULT: "Qwen/Qwen1.5-7B-Chat-GPTQ-Int8",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-7B-Chat-GPTQ-Int8",
|
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-7B-Chat-GPTQ-Int8",
|
||||||
},
|
},
|
||||||
"Qwen1.5-7B-int4-Chat": {
|
"Qwen1.5-7B-Chat-AWQ": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen1.5-7B-Chat-AWQ",
|
DownloadSource.DEFAULT: "Qwen/Qwen1.5-7B-Chat-AWQ",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-7B-Chat-AWQ",
|
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-7B-Chat-AWQ",
|
||||||
},
|
},
|
||||||
"Qwen1.5-14B-int8-Chat": {
|
"Qwen1.5-14B-Chat-GPTQ-Int8": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen1.5-14B-Chat-GPTQ-Int8",
|
DownloadSource.DEFAULT: "Qwen/Qwen1.5-14B-Chat-GPTQ-Int8",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-14B-Chat-GPTQ-Int8",
|
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-14B-Chat-GPTQ-Int8",
|
||||||
},
|
},
|
||||||
"Qwen1.5-14B-int4-Chat": {
|
"Qwen1.5-14B-Chat-AWQ": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen1.5-14B-Chat-AWQ",
|
DownloadSource.DEFAULT: "Qwen/Qwen1.5-14B-Chat-AWQ",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-14B-Chat-AWQ",
|
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-14B-Chat-AWQ",
|
||||||
},
|
},
|
||||||
"Qwen1.5-32B-int4-Chat": {
|
"Qwen1.5-32B-Chat-AWQ": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen1.5-32B-Chat-AWQ",
|
DownloadSource.DEFAULT: "Qwen/Qwen1.5-32B-Chat-AWQ",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-32B-Chat-AWQ",
|
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-32B-Chat-AWQ",
|
||||||
},
|
},
|
||||||
"Qwen1.5-72B-int8-Chat": {
|
"Qwen1.5-72B-Chat-GPTQ-Int8": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen1.5-72B-Chat-GPTQ-Int8",
|
DownloadSource.DEFAULT: "Qwen/Qwen1.5-72B-Chat-GPTQ-Int8",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-72B-Chat-GPTQ-Int8",
|
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-72B-Chat-GPTQ-Int8",
|
||||||
},
|
},
|
||||||
"Qwen1.5-72B-int4-Chat": {
|
"Qwen1.5-72B-Chat-AWQ": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen1.5-72B-Chat-AWQ",
|
DownloadSource.DEFAULT: "Qwen/Qwen1.5-72B-Chat-AWQ",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-72B-Chat-AWQ",
|
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-72B-Chat-AWQ",
|
||||||
},
|
},
|
||||||
"Qwen1.5-110B-int4-Chat": {
|
"Qwen1.5-110B-Chat-AWQ": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen1.5-110B-Chat-AWQ",
|
DownloadSource.DEFAULT: "Qwen/Qwen1.5-110B-Chat-AWQ",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-110B-Chat-AWQ",
|
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-110B-Chat-AWQ",
|
||||||
},
|
},
|
||||||
"Qwen1.5-MoE-A2.7B-int4-Chat": {
|
"Qwen1.5-MoE-A2.7B-Chat-GPTQ-Int4": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen1.5-MoE-A2.7B-Chat-GPTQ-Int4",
|
DownloadSource.DEFAULT: "Qwen/Qwen1.5-MoE-A2.7B-Chat-GPTQ-Int4",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-MoE-A2.7B-Chat-GPTQ-Int4",
|
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-MoE-A2.7B-Chat-GPTQ-Int4",
|
||||||
},
|
},
|
||||||
"Qwen1.5-Code-7B": {
|
"CodeQwen1.5-7B": {
|
||||||
DownloadSource.DEFAULT: "Qwen/CodeQwen1.5-7B",
|
DownloadSource.DEFAULT: "Qwen/CodeQwen1.5-7B",
|
||||||
DownloadSource.MODELSCOPE: "qwen/CodeQwen1.5-7B",
|
DownloadSource.MODELSCOPE: "qwen/CodeQwen1.5-7B",
|
||||||
},
|
},
|
||||||
"Qwen1.5-Code-7B-Chat": {
|
"CodeQwen1.5-7B-Chat": {
|
||||||
DownloadSource.DEFAULT: "Qwen/CodeQwen1.5-7B-Chat",
|
DownloadSource.DEFAULT: "Qwen/CodeQwen1.5-7B-Chat",
|
||||||
DownloadSource.MODELSCOPE: "qwen/CodeQwen1.5-7B-Chat",
|
DownloadSource.MODELSCOPE: "qwen/CodeQwen1.5-7B-Chat",
|
||||||
},
|
},
|
||||||
"Qwen1.5-Code-7B-int4-Chat": {
|
"CodeQwen1.5-7B-Chat-AWQ": {
|
||||||
DownloadSource.DEFAULT: "Qwen/CodeQwen1.5-7B-Chat-AWQ",
|
DownloadSource.DEFAULT: "Qwen/CodeQwen1.5-7B-Chat-AWQ",
|
||||||
DownloadSource.MODELSCOPE: "qwen/CodeQwen1.5-7B-Chat-AWQ",
|
DownloadSource.MODELSCOPE: "qwen/CodeQwen1.5-7B-Chat-AWQ",
|
||||||
},
|
},
|
||||||
@ -1568,51 +1696,51 @@ register_model_group(
|
|||||||
|
|
||||||
register_model_group(
|
register_model_group(
|
||||||
models={
|
models={
|
||||||
"Qwen2VL-2B-Instruct": {
|
"Qwen2-VL-2B-Instruct": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen2-VL-2B-Instruct",
|
DownloadSource.DEFAULT: "Qwen/Qwen2-VL-2B-Instruct",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-2B-Instruct",
|
DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-2B-Instruct",
|
||||||
},
|
},
|
||||||
"Qwen2VL-7B-Instruct": {
|
"Qwen2-VL-7B-Instruct": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen2-VL-7B-Instruct",
|
DownloadSource.DEFAULT: "Qwen/Qwen2-VL-7B-Instruct",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-7B-Instruct",
|
DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-7B-Instruct",
|
||||||
},
|
},
|
||||||
"Qwen2VL-72B-Instruct": {
|
"Qwen2-VL-72B-Instruct": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen2-VL-72B-Instruct",
|
DownloadSource.DEFAULT: "Qwen/Qwen2-VL-72B-Instruct",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-72B-Instruct",
|
DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-72B-Instruct",
|
||||||
},
|
},
|
||||||
"Qwen2VL-2B-Instruct-GPTQ-Int8": {
|
"Qwen2-VL-2B-Instruct-GPTQ-Int8": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen2-VL-2B-Instruct-GPTQ-Int8",
|
DownloadSource.DEFAULT: "Qwen/Qwen2-VL-2B-Instruct-GPTQ-Int8",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-2B-Instruct-GPTQ-Int8",
|
DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-2B-Instruct-GPTQ-Int8",
|
||||||
},
|
},
|
||||||
"Qwen2VL-2B-Instruct-GPTQ-Int4": {
|
"Qwen2-VL-2B-Instruct-GPTQ-Int4": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen2-VL-2B-Instruct-GPTQ-Int4",
|
DownloadSource.DEFAULT: "Qwen/Qwen2-VL-2B-Instruct-GPTQ-Int4",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-2B-Instruct-GPTQ-Int4",
|
DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-2B-Instruct-GPTQ-Int4",
|
||||||
},
|
},
|
||||||
"Qwen2VL-2B-Instruct-AWQ": {
|
"Qwen2-VL-2B-Instruct-AWQ": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen2-VL-2B-Instruct-AWQ",
|
DownloadSource.DEFAULT: "Qwen/Qwen2-VL-2B-Instruct-AWQ",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-2B-Instruct-AWQ",
|
DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-2B-Instruct-AWQ",
|
||||||
},
|
},
|
||||||
"Qwen2VL-7B-Instruct-GPTQ-Int8": {
|
"Qwen2-VL-7B-Instruct-GPTQ-Int8": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen2-VL-7B-Instruct-GPTQ-Int8",
|
DownloadSource.DEFAULT: "Qwen/Qwen2-VL-7B-Instruct-GPTQ-Int8",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-7B-Instruct-GPTQ-Int8",
|
DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-7B-Instruct-GPTQ-Int8",
|
||||||
},
|
},
|
||||||
"Qwen2VL-7B-Instruct-GPTQ-Int4": {
|
"Qwen2-VL-7B-Instruct-GPTQ-Int4": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen2-VL-7B-Instruct-GPTQ-Int4",
|
DownloadSource.DEFAULT: "Qwen/Qwen2-VL-7B-Instruct-GPTQ-Int4",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-7B-Instruct-GPTQ-Int4",
|
DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-7B-Instruct-GPTQ-Int4",
|
||||||
},
|
},
|
||||||
"Qwen2VL-7B-Instruct-AWQ": {
|
"Qwen2-VL-7B-Instruct-AWQ": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen2-VL-7B-Instruct-AWQ",
|
DownloadSource.DEFAULT: "Qwen/Qwen2-VL-7B-Instruct-AWQ",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-7B-Instruct-AWQ",
|
DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-7B-Instruct-AWQ",
|
||||||
},
|
},
|
||||||
"Qwen2VL-72B-Instruct-GPTQ-Int8": {
|
"Qwen2-VL-72B-Instruct-GPTQ-Int8": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen2-VL-72B-Instruct-GPTQ-Int8",
|
DownloadSource.DEFAULT: "Qwen/Qwen2-VL-72B-Instruct-GPTQ-Int8",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-72B-Instruct-GPTQ-Int8",
|
DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-72B-Instruct-GPTQ-Int8",
|
||||||
},
|
},
|
||||||
"Qwen2VL-72B-Instruct-GPTQ-Int4": {
|
"Qwen2-VL-72B-Instruct-GPTQ-Int4": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen2-VL-72B-Instruct-GPTQ-Int4",
|
DownloadSource.DEFAULT: "Qwen/Qwen2-VL-72B-Instruct-GPTQ-Int4",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-72B-Instruct-GPTQ-Int4",
|
DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-72B-Instruct-GPTQ-Int4",
|
||||||
},
|
},
|
||||||
"Qwen2VL-72B-Instruct-AWQ": {
|
"Qwen2-VL-72B-Instruct-AWQ": {
|
||||||
DownloadSource.DEFAULT: "Qwen/Qwen2-VL-72B-Instruct-AWQ",
|
DownloadSource.DEFAULT: "Qwen/Qwen2-VL-72B-Instruct-AWQ",
|
||||||
DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-72B-Instruct-AWQ",
|
DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-72B-Instruct-AWQ",
|
||||||
},
|
},
|
||||||
@ -1689,11 +1817,11 @@ register_model_group(
|
|||||||
|
|
||||||
register_model_group(
|
register_model_group(
|
||||||
models={
|
models={
|
||||||
"Vicuna1.5-7B-Chat": {
|
"Vicuna-v1.5-7B-Chat": {
|
||||||
DownloadSource.DEFAULT: "lmsys/vicuna-7b-v1.5",
|
DownloadSource.DEFAULT: "lmsys/vicuna-7b-v1.5",
|
||||||
DownloadSource.MODELSCOPE: "Xorbits/vicuna-7b-v1.5",
|
DownloadSource.MODELSCOPE: "Xorbits/vicuna-7b-v1.5",
|
||||||
},
|
},
|
||||||
"Vicuna1.5-13B-Chat": {
|
"Vicuna-v1.5-13B-Chat": {
|
||||||
DownloadSource.DEFAULT: "lmsys/vicuna-13b-v1.5",
|
DownloadSource.DEFAULT: "lmsys/vicuna-13b-v1.5",
|
||||||
DownloadSource.MODELSCOPE: "Xorbits/vicuna-13b-v1.5",
|
DownloadSource.MODELSCOPE: "Xorbits/vicuna-13b-v1.5",
|
||||||
},
|
},
|
||||||
@ -1702,6 +1830,17 @@ register_model_group(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
register_model_group(
|
||||||
|
models={
|
||||||
|
"Video-LLaVA-7B-Chat": {
|
||||||
|
DownloadSource.DEFAULT: "LanguageBind/Video-LLaVA-7B-hf",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
template="video_llava",
|
||||||
|
vision=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
register_model_group(
|
register_model_group(
|
||||||
models={
|
models={
|
||||||
"XuanYuan-6B": {
|
"XuanYuan-6B": {
|
||||||
@ -1712,7 +1851,7 @@ register_model_group(
|
|||||||
DownloadSource.DEFAULT: "Duxiaoman-DI/XuanYuan-70B",
|
DownloadSource.DEFAULT: "Duxiaoman-DI/XuanYuan-70B",
|
||||||
DownloadSource.MODELSCOPE: "Duxiaoman-DI/XuanYuan-70B",
|
DownloadSource.MODELSCOPE: "Duxiaoman-DI/XuanYuan-70B",
|
||||||
},
|
},
|
||||||
"XuanYuan-2-70B": {
|
"XuanYuan2-70B": {
|
||||||
DownloadSource.DEFAULT: "Duxiaoman-DI/XuanYuan2-70B",
|
DownloadSource.DEFAULT: "Duxiaoman-DI/XuanYuan2-70B",
|
||||||
DownloadSource.MODELSCOPE: "Duxiaoman-DI/XuanYuan2-70B",
|
DownloadSource.MODELSCOPE: "Duxiaoman-DI/XuanYuan2-70B",
|
||||||
},
|
},
|
||||||
@ -1724,31 +1863,31 @@ register_model_group(
|
|||||||
DownloadSource.DEFAULT: "Duxiaoman-DI/XuanYuan-70B-Chat",
|
DownloadSource.DEFAULT: "Duxiaoman-DI/XuanYuan-70B-Chat",
|
||||||
DownloadSource.MODELSCOPE: "Duxiaoman-DI/XuanYuan-70B-Chat",
|
DownloadSource.MODELSCOPE: "Duxiaoman-DI/XuanYuan-70B-Chat",
|
||||||
},
|
},
|
||||||
"XuanYuan-2-70B-Chat": {
|
"XuanYuan2-70B-Chat": {
|
||||||
DownloadSource.DEFAULT: "Duxiaoman-DI/XuanYuan2-70B-Chat",
|
DownloadSource.DEFAULT: "Duxiaoman-DI/XuanYuan2-70B-Chat",
|
||||||
DownloadSource.MODELSCOPE: "Duxiaoman-DI/XuanYuan2-70B-Chat",
|
DownloadSource.MODELSCOPE: "Duxiaoman-DI/XuanYuan2-70B-Chat",
|
||||||
},
|
},
|
||||||
"XuanYuan-6B-int8-Chat": {
|
"XuanYuan-6B-Chat-8bit": {
|
||||||
DownloadSource.DEFAULT: "Duxiaoman-DI/XuanYuan-6B-Chat-8bit",
|
DownloadSource.DEFAULT: "Duxiaoman-DI/XuanYuan-6B-Chat-8bit",
|
||||||
DownloadSource.MODELSCOPE: "Duxiaoman-DI/XuanYuan-6B-Chat-8bit",
|
DownloadSource.MODELSCOPE: "Duxiaoman-DI/XuanYuan-6B-Chat-8bit",
|
||||||
},
|
},
|
||||||
"XuanYuan-6B-int4-Chat": {
|
"XuanYuan-6B-Chat-4bit": {
|
||||||
DownloadSource.DEFAULT: "Duxiaoman-DI/XuanYuan-6B-Chat-4bit",
|
DownloadSource.DEFAULT: "Duxiaoman-DI/XuanYuan-6B-Chat-4bit",
|
||||||
DownloadSource.MODELSCOPE: "Duxiaoman-DI/XuanYuan-6B-Chat-4bit",
|
DownloadSource.MODELSCOPE: "Duxiaoman-DI/XuanYuan-6B-Chat-4bit",
|
||||||
},
|
},
|
||||||
"XuanYuan-70B-int8-Chat": {
|
"XuanYuan-70B-Chat-8bit": {
|
||||||
DownloadSource.DEFAULT: "Duxiaoman-DI/XuanYuan-70B-Chat-8bit",
|
DownloadSource.DEFAULT: "Duxiaoman-DI/XuanYuan-70B-Chat-8bit",
|
||||||
DownloadSource.MODELSCOPE: "Duxiaoman-DI/XuanYuan-70B-Chat-8bit",
|
DownloadSource.MODELSCOPE: "Duxiaoman-DI/XuanYuan-70B-Chat-8bit",
|
||||||
},
|
},
|
||||||
"XuanYuan-70B-int4-Chat": {
|
"XuanYuan-70B-Chat-4bit": {
|
||||||
DownloadSource.DEFAULT: "Duxiaoman-DI/XuanYuan-70B-Chat-4bit",
|
DownloadSource.DEFAULT: "Duxiaoman-DI/XuanYuan-70B-Chat-4bit",
|
||||||
DownloadSource.MODELSCOPE: "Duxiaoman-DI/XuanYuan-70B-Chat-4bit",
|
DownloadSource.MODELSCOPE: "Duxiaoman-DI/XuanYuan-70B-Chat-4bit",
|
||||||
},
|
},
|
||||||
"XuanYuan-2-70B-int8-Chat": {
|
"XuanYuan2-70B-Chat-8bit": {
|
||||||
DownloadSource.DEFAULT: "Duxiaoman-DI/XuanYuan2-70B-Chat-8bit",
|
DownloadSource.DEFAULT: "Duxiaoman-DI/XuanYuan2-70B-Chat-8bit",
|
||||||
DownloadSource.MODELSCOPE: "Duxiaoman-DI/XuanYuan2-70B-Chat-8bit",
|
DownloadSource.MODELSCOPE: "Duxiaoman-DI/XuanYuan2-70B-Chat-8bit",
|
||||||
},
|
},
|
||||||
"XuanYuan-2-70B-int4-Chat": {
|
"XuanYuan2-70B-Chat-4bit": {
|
||||||
DownloadSource.DEFAULT: "Duxiaoman-DI/XuanYuan2-70B-Chat-4bit",
|
DownloadSource.DEFAULT: "Duxiaoman-DI/XuanYuan2-70B-Chat-4bit",
|
||||||
DownloadSource.MODELSCOPE: "Duxiaoman-DI/XuanYuan2-70B-Chat-4bit",
|
DownloadSource.MODELSCOPE: "Duxiaoman-DI/XuanYuan2-70B-Chat-4bit",
|
||||||
},
|
},
|
||||||
@ -1853,19 +1992,19 @@ register_model_group(
|
|||||||
DownloadSource.DEFAULT: "01-ai/Yi-34B-Chat",
|
DownloadSource.DEFAULT: "01-ai/Yi-34B-Chat",
|
||||||
DownloadSource.MODELSCOPE: "01ai/Yi-34B-Chat",
|
DownloadSource.MODELSCOPE: "01ai/Yi-34B-Chat",
|
||||||
},
|
},
|
||||||
"Yi-6B-int8-Chat": {
|
"Yi-6B-Chat-8bits": {
|
||||||
DownloadSource.DEFAULT: "01-ai/Yi-6B-Chat-8bits",
|
DownloadSource.DEFAULT: "01-ai/Yi-6B-Chat-8bits",
|
||||||
DownloadSource.MODELSCOPE: "01ai/Yi-6B-Chat-8bits",
|
DownloadSource.MODELSCOPE: "01ai/Yi-6B-Chat-8bits",
|
||||||
},
|
},
|
||||||
"Yi-6B-int4-Chat": {
|
"Yi-6B-Chat-4bits": {
|
||||||
DownloadSource.DEFAULT: "01-ai/Yi-6B-Chat-4bits",
|
DownloadSource.DEFAULT: "01-ai/Yi-6B-Chat-4bits",
|
||||||
DownloadSource.MODELSCOPE: "01ai/Yi-6B-Chat-4bits",
|
DownloadSource.MODELSCOPE: "01ai/Yi-6B-Chat-4bits",
|
||||||
},
|
},
|
||||||
"Yi-34B-int8-Chat": {
|
"Yi-34B-Chat-8bits": {
|
||||||
DownloadSource.DEFAULT: "01-ai/Yi-34B-Chat-8bits",
|
DownloadSource.DEFAULT: "01-ai/Yi-34B-Chat-8bits",
|
||||||
DownloadSource.MODELSCOPE: "01ai/Yi-34B-Chat-8bits",
|
DownloadSource.MODELSCOPE: "01ai/Yi-34B-Chat-8bits",
|
||||||
},
|
},
|
||||||
"Yi-34B-int4-Chat": {
|
"Yi-34B-Chat-4bits": {
|
||||||
DownloadSource.DEFAULT: "01-ai/Yi-34B-Chat-4bits",
|
DownloadSource.DEFAULT: "01-ai/Yi-34B-Chat-4bits",
|
||||||
DownloadSource.MODELSCOPE: "01ai/Yi-34B-Chat-4bits",
|
DownloadSource.MODELSCOPE: "01ai/Yi-34B-Chat-4bits",
|
||||||
},
|
},
|
||||||
@ -1916,10 +2055,10 @@ register_model_group(
|
|||||||
|
|
||||||
register_model_group(
|
register_model_group(
|
||||||
models={
|
models={
|
||||||
"YiVL-6B-Chat": {
|
"Yi-VL-6B-Chat": {
|
||||||
DownloadSource.DEFAULT: "BUAADreamer/Yi-VL-6B-hf",
|
DownloadSource.DEFAULT: "BUAADreamer/Yi-VL-6B-hf",
|
||||||
},
|
},
|
||||||
"YiVL-34B-Chat": {
|
"Yi-VL-34B-Chat": {
|
||||||
DownloadSource.DEFAULT: "BUAADreamer/Yi-VL-34B-hf",
|
DownloadSource.DEFAULT: "BUAADreamer/Yi-VL-34B-hf",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from dataclasses import asdict, dataclass, field, fields
|
from dataclasses import dataclass, field, fields
|
||||||
from typing import Any, Dict, Literal, Optional, Union
|
from typing import Any, Dict, Literal, Optional, Union
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
@ -308,20 +308,18 @@ class ModelArguments(QuantizationArguments, ProcessorArguments, ExportArguments,
|
|||||||
if self.export_quantization_bit is not None and self.export_quantization_dataset is None:
|
if self.export_quantization_bit is not None and self.export_quantization_dataset is None:
|
||||||
raise ValueError("Quantization dataset is necessary for exporting.")
|
raise ValueError("Quantization dataset is necessary for exporting.")
|
||||||
|
|
||||||
def to_dict(self) -> Dict[str, Any]:
|
|
||||||
return asdict(self)
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def copyfrom(cls, old_arg: "Self", **kwargs) -> "Self":
|
def copyfrom(cls, source: "Self", **kwargs) -> "Self":
|
||||||
arg_dict = old_arg.to_dict()
|
init_args, lazy_args = {}, {}
|
||||||
arg_dict.update(**kwargs)
|
for attr in fields(source):
|
||||||
for attr in fields(cls):
|
if attr.init:
|
||||||
if not attr.init:
|
init_args[attr.name] = getattr(source, attr.name)
|
||||||
arg_dict.pop(attr.name)
|
else:
|
||||||
|
lazy_args[attr.name] = getattr(source, attr.name)
|
||||||
|
|
||||||
new_arg = cls(**arg_dict)
|
init_args.update(kwargs)
|
||||||
new_arg.compute_dtype = old_arg.compute_dtype
|
result = cls(**init_args)
|
||||||
new_arg.device_map = old_arg.device_map
|
for name, value in lazy_args.items():
|
||||||
new_arg.model_max_length = old_arg.model_max_length
|
setattr(result, name, value)
|
||||||
new_arg.block_diag_attn = old_arg.block_diag_attn
|
|
||||||
return new_arg
|
return result
|
||||||
|
@ -57,7 +57,7 @@ def _parse_args(parser: "HfArgumentParser", args: Optional[Dict[str, Any]] = Non
|
|||||||
if args is not None:
|
if args is not None:
|
||||||
return parser.parse_dict(args)
|
return parser.parse_dict(args)
|
||||||
|
|
||||||
if len(sys.argv) == 2 and sys.argv[1].endswith(".yaml"):
|
if len(sys.argv) == 2 and (sys.argv[1].endswith(".yaml") or sys.argv[1].endswith(".yml")):
|
||||||
return parser.parse_yaml_file(os.path.abspath(sys.argv[1]))
|
return parser.parse_yaml_file(os.path.abspath(sys.argv[1]))
|
||||||
|
|
||||||
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
|
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
|
||||||
@ -123,7 +123,7 @@ def _check_extra_dependencies(
|
|||||||
require_version("mixture-of-depth>=1.1.6", "To fix: pip install mixture-of-depth>=1.1.6")
|
require_version("mixture-of-depth>=1.1.6", "To fix: pip install mixture-of-depth>=1.1.6")
|
||||||
|
|
||||||
if model_args.infer_backend == "vllm":
|
if model_args.infer_backend == "vllm":
|
||||||
require_version("vllm>=0.4.3,<=0.6.0", "To fix: pip install vllm>=0.4.3,<=0.6.0")
|
require_version("vllm>=0.4.3,<=0.6.2", "To fix: pip install vllm>=0.4.3,<=0.6.2")
|
||||||
|
|
||||||
if finetuning_args.use_galore:
|
if finetuning_args.use_galore:
|
||||||
require_version("galore_torch", "To fix: pip install galore_torch")
|
require_version("galore_torch", "To fix: pip install galore_torch")
|
||||||
|
@ -21,12 +21,12 @@ from trl import AutoModelForCausalLMWithValueHead
|
|||||||
from ..extras.logging import get_logger
|
from ..extras.logging import get_logger
|
||||||
from ..extras.misc import count_parameters, skip_check_imports, try_download_model_from_ms
|
from ..extras.misc import count_parameters, skip_check_imports, try_download_model_from_ms
|
||||||
from .adapter import init_adapter
|
from .adapter import init_adapter
|
||||||
|
from .model_utils.liger_kernel import apply_liger_kernel
|
||||||
from .model_utils.misc import register_autoclass
|
from .model_utils.misc import register_autoclass
|
||||||
from .model_utils.mod import convert_pretrained_model_to_mod, load_mod_pretrained_model
|
from .model_utils.mod import convert_pretrained_model_to_mod, load_mod_pretrained_model
|
||||||
from .model_utils.unsloth import load_unsloth_pretrained_model
|
from .model_utils.unsloth import load_unsloth_pretrained_model
|
||||||
from .model_utils.valuehead import load_valuehead_params
|
from .model_utils.valuehead import load_valuehead_params
|
||||||
from .model_utils.visual import get_image_seqlen
|
from .patcher import patch_config, patch_model, patch_processor, patch_tokenizer, patch_valuehead_model
|
||||||
from .patcher import patch_config, patch_model, patch_tokenizer, patch_valuehead_model
|
|
||||||
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
@ -61,7 +61,7 @@ def _get_init_kwargs(model_args: "ModelArguments") -> Dict[str, Any]:
|
|||||||
|
|
||||||
def load_tokenizer(model_args: "ModelArguments") -> "TokenizerModule":
|
def load_tokenizer(model_args: "ModelArguments") -> "TokenizerModule":
|
||||||
r"""
|
r"""
|
||||||
Loads pretrained tokenizer.
|
Loads pretrained tokenizer and optionally loads processor.
|
||||||
|
|
||||||
Note: including inplace operation of model_args.
|
Note: including inplace operation of model_args.
|
||||||
"""
|
"""
|
||||||
@ -96,15 +96,9 @@ def load_tokenizer(model_args: "ModelArguments") -> "TokenizerModule":
|
|||||||
logger.warning("New tokens have been added, changed `resize_vocab` to True.")
|
logger.warning("New tokens have been added, changed `resize_vocab` to True.")
|
||||||
|
|
||||||
patch_tokenizer(tokenizer)
|
patch_tokenizer(tokenizer)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
processor = AutoProcessor.from_pretrained(model_args.model_name_or_path, **init_kwargs)
|
processor = AutoProcessor.from_pretrained(model_args.model_name_or_path, **init_kwargs)
|
||||||
setattr(processor, "tokenizer", tokenizer)
|
patch_processor(processor, config, tokenizer, model_args)
|
||||||
setattr(processor, "image_seqlen", get_image_seqlen(config))
|
|
||||||
setattr(processor, "image_resolution", model_args.image_resolution)
|
|
||||||
setattr(processor, "video_resolution", model_args.video_resolution)
|
|
||||||
setattr(processor, "video_fps", model_args.video_fps)
|
|
||||||
setattr(processor, "video_maxlen", model_args.video_maxlen)
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning("Failed to load processor. Error: {}".format(e))
|
logger.warning("Failed to load processor. Error: {}".format(e))
|
||||||
processor = None
|
processor = None
|
||||||
@ -138,6 +132,7 @@ def load_model(
|
|||||||
init_kwargs = _get_init_kwargs(model_args)
|
init_kwargs = _get_init_kwargs(model_args)
|
||||||
config = load_config(model_args)
|
config = load_config(model_args)
|
||||||
patch_config(config, tokenizer, model_args, init_kwargs, is_trainable)
|
patch_config(config, tokenizer, model_args, init_kwargs, is_trainable)
|
||||||
|
apply_liger_kernel(config, model_args, is_trainable, require_logits=(finetuning_args.stage not in ["pt", "sft"]))
|
||||||
|
|
||||||
model = None
|
model = None
|
||||||
lazy_load = False
|
lazy_load = False
|
||||||
@ -158,7 +153,6 @@ def load_model(
|
|||||||
load_class = AutoModelForVision2Seq
|
load_class = AutoModelForVision2Seq
|
||||||
else:
|
else:
|
||||||
load_class = AutoModelForCausalLM
|
load_class = AutoModelForCausalLM
|
||||||
|
|
||||||
if model_args.train_from_scratch:
|
if model_args.train_from_scratch:
|
||||||
model = load_class.from_config(config)
|
model = load_class.from_config(config)
|
||||||
else:
|
else:
|
||||||
|
@ -37,10 +37,11 @@ def configure_attn_implementation(
|
|||||||
if is_flash_attn_2_available():
|
if is_flash_attn_2_available():
|
||||||
require_version("transformers>=4.42.4", "To fix: pip install transformers>=4.42.4")
|
require_version("transformers>=4.42.4", "To fix: pip install transformers>=4.42.4")
|
||||||
require_version("flash_attn>=2.6.3", "To fix: pip install flash_attn>=2.6.3")
|
require_version("flash_attn>=2.6.3", "To fix: pip install flash_attn>=2.6.3")
|
||||||
logger.warning("Gemma-2 should use flash attention 2, change `flash_attn` to fa2.")
|
if model_args.flash_attn != "fa2":
|
||||||
model_args.flash_attn = "fa2"
|
logger.warning("Gemma-2 should use flash attention 2, change `flash_attn` to fa2.")
|
||||||
|
model_args.flash_attn = "fa2"
|
||||||
else:
|
else:
|
||||||
logger.warning("Gemma-2 should use eager attention, change `flash_attn` to disabled.")
|
logger.warning("FlashAttention-2 is not installed, use eager attention.")
|
||||||
model_args.flash_attn = "disabled"
|
model_args.flash_attn = "disabled"
|
||||||
elif model_args.flash_attn == "sdpa":
|
elif model_args.flash_attn == "sdpa":
|
||||||
logger.warning("Gemma-2 should use soft-capping attention, while the SDPA attention does not support it.")
|
logger.warning("Gemma-2 should use soft-capping attention, while the SDPA attention does not support it.")
|
||||||
|
@ -12,6 +12,7 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
import inspect
|
||||||
from typing import TYPE_CHECKING
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
from ...extras.logging import get_logger
|
from ...extras.logging import get_logger
|
||||||
@ -26,7 +27,12 @@ if TYPE_CHECKING:
|
|||||||
logger = get_logger(__name__)
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def configure_liger_kernel(config: "PretrainedConfig", model_args: "ModelArguments", is_trainable: bool) -> None:
|
def apply_liger_kernel(
|
||||||
|
config: "PretrainedConfig",
|
||||||
|
model_args: "ModelArguments",
|
||||||
|
is_trainable: bool,
|
||||||
|
require_logits: bool,
|
||||||
|
) -> None:
|
||||||
if not is_trainable or not model_args.enable_liger_kernel:
|
if not is_trainable or not model_args.enable_liger_kernel:
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -51,5 +57,11 @@ def configure_liger_kernel(config: "PretrainedConfig", model_args: "ModelArgumen
|
|||||||
logger.warning("Current model does not support liger kernel.")
|
logger.warning("Current model does not support liger kernel.")
|
||||||
return
|
return
|
||||||
|
|
||||||
apply_liger_kernel()
|
if require_logits and "fused_linear_cross_entropy" in inspect.signature(apply_liger_kernel).parameters:
|
||||||
|
logger.info("Current training stage does not support chunked cross entropy.")
|
||||||
|
kwargs = {"fused_linear_cross_entropy": False}
|
||||||
|
else:
|
||||||
|
kwargs = {}
|
||||||
|
|
||||||
|
apply_liger_kernel(**kwargs)
|
||||||
logger.info("Liger kernel has been applied to the model.")
|
logger.info("Liger kernel has been applied to the model.")
|
||||||
|
@ -34,7 +34,7 @@ def find_all_linear_modules(model: "PreTrainedModel", freeze_vision_tower: bool)
|
|||||||
forbidden_modules.add("output_layer")
|
forbidden_modules.add("output_layer")
|
||||||
elif model_type == "internlm2":
|
elif model_type == "internlm2":
|
||||||
forbidden_modules.add("output")
|
forbidden_modules.add("output")
|
||||||
elif model_type in ["llava", "paligemma"]:
|
elif model_type in ["llava", "llava_next", "llava_next_video", "paligemma", "video_llava"]:
|
||||||
forbidden_modules.add("multi_modal_projector")
|
forbidden_modules.add("multi_modal_projector")
|
||||||
elif model_type == "qwen2_vl":
|
elif model_type == "qwen2_vl":
|
||||||
forbidden_modules.add("merger")
|
forbidden_modules.add("merger")
|
||||||
|
@ -92,7 +92,7 @@ def autocast_projector_dtype(model: "PreTrainedModel", model_args: "ModelArgumen
|
|||||||
|
|
||||||
if getattr(model, "quantization_method", None):
|
if getattr(model, "quantization_method", None):
|
||||||
model_type = getattr(model.config, "model_type", None)
|
model_type = getattr(model.config, "model_type", None)
|
||||||
if model_type in ["llava", "paligemma"]:
|
if model_type in ["llava", "llava_next", "llava_next_video", "paligemma", "video_llava"]:
|
||||||
mm_projector: "torch.nn.Module" = getattr(model, "multi_modal_projector")
|
mm_projector: "torch.nn.Module" = getattr(model, "multi_modal_projector")
|
||||||
elif model_type == "qwen2_vl":
|
elif model_type == "qwen2_vl":
|
||||||
mm_projector: "torch.nn.Module" = getattr(getattr(model, "visual"), "merger")
|
mm_projector: "torch.nn.Module" = getattr(getattr(model, "visual"), "merger")
|
||||||
@ -108,7 +108,13 @@ def configure_visual_model(config: "PretrainedConfig") -> None:
|
|||||||
Patches VLMs before loading them.
|
Patches VLMs before loading them.
|
||||||
"""
|
"""
|
||||||
model_type = getattr(config, "model_type", None)
|
model_type = getattr(config, "model_type", None)
|
||||||
if model_type == "llava": # required for ds zero3 and valuehead models
|
if model_type in [
|
||||||
|
"llava",
|
||||||
|
"llava_next",
|
||||||
|
"llava_next_video",
|
||||||
|
"paligemma",
|
||||||
|
"video_llava",
|
||||||
|
]: # required for ds zero3 and valuehead models
|
||||||
setattr(config, "hidden_size", getattr(config.text_config, "hidden_size", None))
|
setattr(config, "hidden_size", getattr(config.text_config, "hidden_size", None))
|
||||||
|
|
||||||
if getattr(config, "is_yi_vl_derived_model", None):
|
if getattr(config, "is_yi_vl_derived_model", None):
|
||||||
@ -122,7 +128,7 @@ def get_forbidden_modules(config: "PretrainedConfig", finetuning_args: "Finetuni
|
|||||||
"""
|
"""
|
||||||
model_type = getattr(config, "model_type", None)
|
model_type = getattr(config, "model_type", None)
|
||||||
forbidden_modules = set()
|
forbidden_modules = set()
|
||||||
if model_type in ["llava", "paligemma"]:
|
if model_type in ["llava", "llava_next", "llava_next_video", "paligemma", "video_llava"]:
|
||||||
if finetuning_args.freeze_vision_tower:
|
if finetuning_args.freeze_vision_tower:
|
||||||
forbidden_modules.add("vision_tower")
|
forbidden_modules.add("vision_tower")
|
||||||
|
|
||||||
@ -150,12 +156,28 @@ def get_image_seqlen(config: "PretrainedConfig") -> int:
|
|||||||
image_seqlen += 1
|
image_seqlen += 1
|
||||||
elif model_type == "paligemma":
|
elif model_type == "paligemma":
|
||||||
image_seqlen = config.vision_config.num_image_tokens
|
image_seqlen = config.vision_config.num_image_tokens
|
||||||
elif model_type == "qwen2_vl": # variable length
|
else:
|
||||||
image_seqlen = -1
|
image_seqlen = -1
|
||||||
|
|
||||||
return image_seqlen
|
return image_seqlen
|
||||||
|
|
||||||
|
|
||||||
|
def get_patch_size(config: "PretrainedConfig") -> int:
|
||||||
|
r"""
|
||||||
|
Computes the patch size of the vit.
|
||||||
|
"""
|
||||||
|
patch_size = getattr(config.vision_config, "patch_size", -1)
|
||||||
|
return patch_size
|
||||||
|
|
||||||
|
|
||||||
|
def get_vision_feature_select_strategy(config: "PretrainedConfig") -> int:
|
||||||
|
r"""
|
||||||
|
Get the vision_feature_select_strategy.
|
||||||
|
"""
|
||||||
|
vision_feature_select_strategy = getattr(config, "vision_feature_select_strategy", "default")
|
||||||
|
return vision_feature_select_strategy
|
||||||
|
|
||||||
|
|
||||||
def patch_target_modules(
|
def patch_target_modules(
|
||||||
config: "PretrainedConfig", finetuning_args: "FinetuningArguments", target_modules: Sequence[str]
|
config: "PretrainedConfig", finetuning_args: "FinetuningArguments", target_modules: Sequence[str]
|
||||||
) -> Union[str, List[str]]:
|
) -> Union[str, List[str]]:
|
||||||
@ -164,7 +186,7 @@ def patch_target_modules(
|
|||||||
"""
|
"""
|
||||||
model_type = getattr(config, "model_type", None)
|
model_type = getattr(config, "model_type", None)
|
||||||
if finetuning_args.freeze_vision_tower:
|
if finetuning_args.freeze_vision_tower:
|
||||||
if model_type in ["llava", "paligemma"]:
|
if model_type in ["llava", "llava_next", "llava_next_video", "paligemma", "video_llava"]:
|
||||||
return "^(?!.*vision_tower).*(?:{}).*".format("|".join(target_modules))
|
return "^(?!.*vision_tower).*(?:{}).*".format("|".join(target_modules))
|
||||||
elif model_type == "qwen2_vl":
|
elif model_type == "qwen2_vl":
|
||||||
return "^(?!.*visual).*(?:{}).*".format("|".join(target_modules))
|
return "^(?!.*visual).*(?:{}).*".format("|".join(target_modules))
|
||||||
|
@ -27,18 +27,23 @@ from ..extras.misc import infer_optim_dtype
|
|||||||
from .model_utils.attention import configure_attn_implementation, print_attn_implementation
|
from .model_utils.attention import configure_attn_implementation, print_attn_implementation
|
||||||
from .model_utils.checkpointing import prepare_model_for_training
|
from .model_utils.checkpointing import prepare_model_for_training
|
||||||
from .model_utils.embedding import resize_embedding_layer
|
from .model_utils.embedding import resize_embedding_layer
|
||||||
from .model_utils.liger_kernel import configure_liger_kernel
|
|
||||||
from .model_utils.longlora import configure_longlora
|
from .model_utils.longlora import configure_longlora
|
||||||
from .model_utils.moe import add_z3_leaf_module, configure_moe
|
from .model_utils.moe import add_z3_leaf_module, configure_moe
|
||||||
from .model_utils.packing import configure_packing
|
from .model_utils.packing import configure_packing
|
||||||
from .model_utils.quantization import configure_quantization
|
from .model_utils.quantization import configure_quantization
|
||||||
from .model_utils.rope import configure_rope
|
from .model_utils.rope import configure_rope
|
||||||
from .model_utils.valuehead import prepare_valuehead_model
|
from .model_utils.valuehead import prepare_valuehead_model
|
||||||
from .model_utils.visual import autocast_projector_dtype, configure_visual_model
|
from .model_utils.visual import (
|
||||||
|
autocast_projector_dtype,
|
||||||
|
configure_visual_model,
|
||||||
|
get_image_seqlen,
|
||||||
|
get_patch_size,
|
||||||
|
get_vision_feature_select_strategy,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from transformers import PretrainedConfig, PreTrainedTokenizer
|
from transformers import PretrainedConfig, PreTrainedTokenizer, ProcessorMixin
|
||||||
from trl import AutoModelForCausalLMWithValueHead
|
from trl import AutoModelForCausalLMWithValueHead
|
||||||
|
|
||||||
from ..hparams import ModelArguments
|
from ..hparams import ModelArguments
|
||||||
@ -52,6 +57,22 @@ def patch_tokenizer(tokenizer: "PreTrainedTokenizer") -> None:
|
|||||||
tokenizer._pad = MethodType(PreTrainedTokenizerBase._pad, tokenizer)
|
tokenizer._pad = MethodType(PreTrainedTokenizerBase._pad, tokenizer)
|
||||||
|
|
||||||
|
|
||||||
|
def patch_processor(
|
||||||
|
processor: "ProcessorMixin",
|
||||||
|
config: "PretrainedConfig",
|
||||||
|
tokenizer: "PreTrainedTokenizer",
|
||||||
|
model_args: "ModelArguments",
|
||||||
|
) -> None:
|
||||||
|
setattr(processor, "tokenizer", tokenizer)
|
||||||
|
setattr(processor, "image_seqlen", get_image_seqlen(config))
|
||||||
|
setattr(processor, "image_resolution", model_args.image_resolution)
|
||||||
|
setattr(processor, "patch_size", get_patch_size(config))
|
||||||
|
setattr(processor, "video_resolution", model_args.video_resolution)
|
||||||
|
setattr(processor, "video_fps", model_args.video_fps)
|
||||||
|
setattr(processor, "video_maxlen", model_args.video_maxlen)
|
||||||
|
setattr(processor, "vision_feature_select_strategy", get_vision_feature_select_strategy(config))
|
||||||
|
|
||||||
|
|
||||||
def patch_config(
|
def patch_config(
|
||||||
config: "PretrainedConfig",
|
config: "PretrainedConfig",
|
||||||
tokenizer: "PreTrainedTokenizer",
|
tokenizer: "PreTrainedTokenizer",
|
||||||
@ -71,7 +92,6 @@ def patch_config(
|
|||||||
|
|
||||||
configure_attn_implementation(config, model_args, is_trainable)
|
configure_attn_implementation(config, model_args, is_trainable)
|
||||||
configure_rope(config, model_args, is_trainable)
|
configure_rope(config, model_args, is_trainable)
|
||||||
configure_liger_kernel(config, model_args, is_trainable)
|
|
||||||
configure_longlora(config, model_args, is_trainable)
|
configure_longlora(config, model_args, is_trainable)
|
||||||
configure_quantization(config, tokenizer, model_args, init_kwargs)
|
configure_quantization(config, tokenizer, model_args, init_kwargs)
|
||||||
configure_moe(config, model_args, is_trainable)
|
configure_moe(config, model_args, is_trainable)
|
||||||
@ -90,6 +110,9 @@ def patch_config(
|
|||||||
if getattr(config, "model_type", None) == "qwen2" and is_trainable and model_args.flash_attn == "fa2":
|
if getattr(config, "model_type", None) == "qwen2" and is_trainable and model_args.flash_attn == "fa2":
|
||||||
setattr(config, "use_cache", False) # qwen2 does not support use_cache when using flash attn
|
setattr(config, "use_cache", False) # qwen2 does not support use_cache when using flash attn
|
||||||
|
|
||||||
|
if "LlavaLlamaForCausalLM" in getattr(config, "architectures", []):
|
||||||
|
raise ValueError("Please download llava models with hf-compatible format: https://huggingface.co/llava-hf")
|
||||||
|
|
||||||
# deepspeed zero3 is not compatible with low_cpu_mem_usage
|
# deepspeed zero3 is not compatible with low_cpu_mem_usage
|
||||||
init_kwargs["low_cpu_mem_usage"] = model_args.low_cpu_mem_usage and (not is_deepspeed_zero3_enabled())
|
init_kwargs["low_cpu_mem_usage"] = model_args.low_cpu_mem_usage and (not is_deepspeed_zero3_enabled())
|
||||||
|
|
||||||
|
@ -115,13 +115,6 @@ def get_model_path(model_name: str) -> str:
|
|||||||
return model_path
|
return model_path
|
||||||
|
|
||||||
|
|
||||||
def get_prefix(model_name: str) -> str:
|
|
||||||
r"""
|
|
||||||
Gets the prefix of the model name to obtain the model family.
|
|
||||||
"""
|
|
||||||
return model_name.split("-")[0]
|
|
||||||
|
|
||||||
|
|
||||||
def get_model_info(model_name: str) -> Tuple[str, str]:
|
def get_model_info(model_name: str) -> Tuple[str, str]:
|
||||||
r"""
|
r"""
|
||||||
Gets the necessary information of this model.
|
Gets the necessary information of this model.
|
||||||
@ -137,21 +130,14 @@ def get_template(model_name: str) -> str:
|
|||||||
r"""
|
r"""
|
||||||
Gets the template name if the model is a chat model.
|
Gets the template name if the model is a chat model.
|
||||||
"""
|
"""
|
||||||
if (
|
return DEFAULT_TEMPLATE.get(model_name, "default")
|
||||||
model_name
|
|
||||||
and any(suffix in model_name for suffix in ("-Chat", "-Instruct"))
|
|
||||||
and get_prefix(model_name) in DEFAULT_TEMPLATE
|
|
||||||
):
|
|
||||||
return DEFAULT_TEMPLATE[get_prefix(model_name)]
|
|
||||||
|
|
||||||
return "default"
|
|
||||||
|
|
||||||
|
|
||||||
def get_visual(model_name: str) -> bool:
|
def get_visual(model_name: str) -> bool:
|
||||||
r"""
|
r"""
|
||||||
Judges if the model is a vision language model.
|
Judges if the model is a vision language model.
|
||||||
"""
|
"""
|
||||||
return get_prefix(model_name) in VISION_MODELS
|
return model_name in VISION_MODELS
|
||||||
|
|
||||||
|
|
||||||
def list_checkpoints(model_name: str, finetuning_type: str) -> "gr.Dropdown":
|
def list_checkpoints(model_name: str, finetuning_type: str) -> "gr.Dropdown":
|
||||||
|
@ -136,6 +136,32 @@ def test_llava_plugin():
|
|||||||
_check_plugin(**check_inputs)
|
_check_plugin(**check_inputs)
|
||||||
|
|
||||||
|
|
||||||
|
def test_llava_next_plugin():
|
||||||
|
tokenizer, processor = _load_tokenizer_module(model_name_or_path="llava-hf/llava-v1.6-vicuna-7b-hf")
|
||||||
|
llava_next_plugin = get_mm_plugin(name="llava_next", image_token="<image>")
|
||||||
|
check_inputs = {"plugin": llava_next_plugin, "tokenizer": tokenizer, "processor": processor}
|
||||||
|
image_seqlen = 1176
|
||||||
|
check_inputs["expected_mm_messages"] = [
|
||||||
|
{key: value.replace("<image>", "<image>" * image_seqlen) for key, value in message.items()}
|
||||||
|
for message in MM_MESSAGES
|
||||||
|
]
|
||||||
|
check_inputs["expected_mm_inputs"] = _get_mm_inputs(processor)
|
||||||
|
_check_plugin(**check_inputs)
|
||||||
|
|
||||||
|
|
||||||
|
def test_llava_next_video_plugin():
|
||||||
|
tokenizer, processor = _load_tokenizer_module(model_name_or_path="llava-hf/LLaVA-NeXT-Video-7B-hf")
|
||||||
|
llava_next_video_plugin = get_mm_plugin(name="llava_next_video", image_token="<image>", video_token="<video>")
|
||||||
|
check_inputs = {"plugin": llava_next_video_plugin, "tokenizer": tokenizer, "processor": processor}
|
||||||
|
image_seqlen = 1176
|
||||||
|
check_inputs["expected_mm_messages"] = [
|
||||||
|
{key: value.replace("<image>", "<image>" * image_seqlen) for key, value in message.items()}
|
||||||
|
for message in MM_MESSAGES
|
||||||
|
]
|
||||||
|
check_inputs["expected_mm_inputs"] = _get_mm_inputs(processor)
|
||||||
|
_check_plugin(**check_inputs)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(not HF_TOKEN, reason="Gated model.")
|
@pytest.mark.skipif(not HF_TOKEN, reason="Gated model.")
|
||||||
def test_paligemma_plugin():
|
def test_paligemma_plugin():
|
||||||
tokenizer, processor = _load_tokenizer_module(model_name_or_path="google/paligemma-3b-pt-224")
|
tokenizer, processor = _load_tokenizer_module(model_name_or_path="google/paligemma-3b-pt-224")
|
||||||
@ -167,3 +193,16 @@ def test_qwen2_vl_plugin():
|
|||||||
]
|
]
|
||||||
check_inputs["expected_mm_inputs"] = _get_mm_inputs(processor)
|
check_inputs["expected_mm_inputs"] = _get_mm_inputs(processor)
|
||||||
_check_plugin(**check_inputs)
|
_check_plugin(**check_inputs)
|
||||||
|
|
||||||
|
|
||||||
|
def test_video_llava_plugin():
|
||||||
|
tokenizer, processor = _load_tokenizer_module(model_name_or_path="LanguageBind/Video-LLaVA-7B-hf")
|
||||||
|
video_llava_plugin = get_mm_plugin(name="video_llava", image_token="<image>", video_token="<video>")
|
||||||
|
check_inputs = {"plugin": video_llava_plugin, "tokenizer": tokenizer, "processor": processor}
|
||||||
|
image_seqlen = 256
|
||||||
|
check_inputs["expected_mm_messages"] = [
|
||||||
|
{key: value.replace("<image>", "<image>" * image_seqlen) for key, value in message.items()}
|
||||||
|
for message in MM_MESSAGES
|
||||||
|
]
|
||||||
|
check_inputs["expected_mm_inputs"] = _get_mm_inputs(processor)
|
||||||
|
_check_plugin(**check_inputs)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user