diff --git a/.env.local b/.env.local index a58ed445..363317e1 100644 --- a/.env.local +++ b/.env.local @@ -8,7 +8,6 @@ FASTAPI_ROOT_PATH= # general DISABLE_VERSION_CHECK= FORCE_CHECK_IMPORTS= -FORCE_TORCHRUN= LLAMAFACTORY_VERBOSITY= USE_MODELSCOPE_HUB= RECORD_VRAM= diff --git a/README.md b/README.md index 51fbc7c1..de88d8dd 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ https://github.com/user-attachments/assets/7c96b465-9df7-45f4-8053-bf03e58386d3 Choose your path: - **Colab**: https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing -- **PAI-DSW**: https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory +- **PAI-DSW**: [Llama3 Example](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory) | [Qwen2-VL Example](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory_qwen2vl) - **Local machine**: Please refer to [usage](#getting-started) - **Documentation (WIP)**: https://llamafactory.readthedocs.io/zh-cn/latest/ @@ -72,6 +72,8 @@ Compared to ChatGLM's [P-Tuning](https://github.com/THUDM/ChatGLM2-6B/tree/main/ ## Changelog +[24/09/19] We support fine-tuning the **[Qwen2.5](https://qwenlm.github.io/blog/qwen2.5/)** models. + [24/08/30] We support fine-tuning the **[Qwen2-VL](https://qwenlm.github.io/blog/qwen2-vl/)** models. Thank [@simonJJJ](https://github.com/simonJJJ)'s PR. [24/08/27] We support **[Liger Kernel](https://github.com/linkedin/Liger-Kernel)**. Try `enable_liger_kernel: true` for efficient training. @@ -173,7 +175,7 @@ Compared to ChatGLM's [P-Tuning](https://github.com/THUDM/ChatGLM2-6B/tree/main/ | [InternLM2/InternLM2.5](https://huggingface.co/internlm) | 7B/20B | intern2 | | [Llama](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - | | [Llama 2](https://huggingface.co/meta-llama) | 7B/13B/70B | llama2 | -| [Llama 3/Llama 3.1](https://huggingface.co/meta-llama) | 8B/70B | llama3 | +| [Llama 3/Llama 3.1/Llama3.2](https://huggingface.co/meta-llama) | 1B/3B/8B/70B | llama3 | | [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | llava | | [LLaVA-NeXT](https://huggingface.co/llava-hf) | 7B/13B | llava_next | | [LLaVA-NeXT-Video](https://huggingface.co/llava-hf) | 7B/13B | llava_next_video | @@ -183,8 +185,8 @@ Compared to ChatGLM's [P-Tuning](https://github.com/THUDM/ChatGLM2-6B/tree/main/ | [PaliGemma](https://huggingface.co/google) | 3B | paligemma | | [Phi-1.5/Phi-2](https://huggingface.co/microsoft) | 1.3B/2.7B | - | | [Phi-3](https://huggingface.co/microsoft) | 4B/7B/14B | phi | -| [Qwen/Qwen1.5/Qwen2 (Code/Math/MoE)](https://huggingface.co/Qwen) | 0.5B/1.5B/4B/7B/14B/32B/72B/110B | qwen | -| [Qwen2-VL](https://huggingface.co/Qwen) | 2B/7B | qwen2_vl | +| [Qwen (1-2.5) (Code/Math/MoE)](https://huggingface.co/Qwen) | 0.5B/1.5B/3B/7B/14B/32B/72B/110B | qwen | +| [Qwen2-VL](https://huggingface.co/Qwen) | 2B/7B/72B | qwen2_vl | | [StarCoder 2](https://huggingface.co/bigcode) | 3B/7B/15B | - | | [XVERSE](https://huggingface.co/xverse) | 7B/13B/65B | xverse | | [Yi/Yi-1.5 (Code)](https://huggingface.co/01-ai) | 1.5B/6B/9B/34B | yi | diff --git a/README_zh.md b/README_zh.md index 251b1f87..6e64d855 100644 --- a/README_zh.md +++ b/README_zh.md @@ -26,7 +26,7 @@ https://github.com/user-attachments/assets/e6ce34b0-52d5-4f3e-a830-592106c4c272 选择你的打开方式: - **Colab**:https://colab.research.google.com/drive/1d5KQtbemerlSDSxZIfAaWXhKr30QypiK?usp=sharing -- **PAI-DSW**:https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory +- **PAI-DSW**:[Llama3 案例](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory) | [Qwen2-VL 案例](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory_qwen2vl) - **本地机器**:请见[如何使用](#如何使用) - **入门教程**:https://zhuanlan.zhihu.com/p/695287607 - **框架文档**:https://llamafactory.readthedocs.io/zh-cn/latest/ @@ -73,6 +73,8 @@ https://github.com/user-attachments/assets/e6ce34b0-52d5-4f3e-a830-592106c4c272 ## 更新日志 +[24/09/19] 我们支持了 **[Qwen2.5](https://qwenlm.github.io/blog/qwen2.5/)** 模型的微调。 + [24/08/30] 我们支持了 **[Qwen2-VL](https://qwenlm.github.io/blog/qwen2-vl/)** 模型的微调。感谢 [@simonJJJ](https://github.com/simonJJJ) 的 PR。 [24/08/27] 我们支持了 **[Liger Kernel](https://github.com/linkedin/Liger-Kernel)**。请使用 `enable_liger_kernel: true` 来加速训练。 @@ -174,7 +176,7 @@ https://github.com/user-attachments/assets/e6ce34b0-52d5-4f3e-a830-592106c4c272 | [InternLM2/InternLM2.5](https://huggingface.co/internlm) | 7B/20B | intern2 | | [Llama](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - | | [Llama 2](https://huggingface.co/meta-llama) | 7B/13B/70B | llama2 | -| [Llama 3/Llama 3.1](https://huggingface.co/meta-llama) | 8B/70B | llama3 | +| [Llama 3/Llama 3.1/Llama3.2](https://huggingface.co/meta-llama) | 1B/3B/8B/70B | llama3 | | [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | llava | | [LLaVA-NeXT](https://huggingface.co/llava-hf) | 7B/13B | llava_next | | [LLaVA-NeXT-Video](https://huggingface.co/llava-hf) | 7B/13B | llava_next_video | @@ -184,8 +186,8 @@ https://github.com/user-attachments/assets/e6ce34b0-52d5-4f3e-a830-592106c4c272 | [PaliGemma](https://huggingface.co/google) | 3B | paligemma | | [Phi-1.5/Phi-2](https://huggingface.co/microsoft) | 1.3B/2.7B | - | | [Phi-3](https://huggingface.co/microsoft) | 4B/7B/14B | phi | -| [Qwen/Qwen1.5/Qwen2 (Code/Math/MoE)](https://huggingface.co/Qwen) | 0.5B/1.5B/4B/7B/14B/32B/72B/110B | qwen | -| [Qwen2-VL](https://huggingface.co/Qwen) | 2B/7B | qwen2_vl | +| [Qwen (1-2.5) (Code/Math/MoE)](https://huggingface.co/Qwen) | 0.5B/1.5B/3B/7B/14B/32B/72B/110B | qwen | +| [Qwen2-VL](https://huggingface.co/Qwen) | 2B/7B/72B | qwen2_vl | | [StarCoder 2](https://huggingface.co/bigcode) | 3B/7B/15B | - | | [XVERSE](https://huggingface.co/xverse) | 7B/13B/65B | xverse | | [Yi/Yi-1.5 (Code)](https://huggingface.co/01-ai) | 1.5B/6B/9B/34B | yi | diff --git a/assets/wechat.jpg b/assets/wechat.jpg index 29ce1691..f2d57406 100644 Binary files a/assets/wechat.jpg and b/assets/wechat.jpg differ diff --git a/assets/wechat_npu.jpg b/assets/wechat_npu.jpg index 7387e556..7708e35a 100644 Binary files a/assets/wechat_npu.jpg and b/assets/wechat_npu.jpg differ diff --git a/docker/docker-cuda/Dockerfile b/docker/docker-cuda/Dockerfile index a43baf21..d03ece88 100644 --- a/docker/docker-cuda/Dockerfile +++ b/docker/docker-cuda/Dockerfile @@ -12,6 +12,9 @@ ARG INSTALL_BNB=false ARG INSTALL_VLLM=false ARG INSTALL_DEEPSPEED=false ARG INSTALL_FLASHATTN=false +ARG INSTALL_LIGER_KERNEL=false +ARG INSTALL_HQQ=false +ARG INSTALL_EETQ=false ARG PIP_INDEX=https://pypi.org/simple # Set the working directory @@ -38,6 +41,15 @@ RUN EXTRA_PACKAGES="metrics"; \ if [ "$INSTALL_DEEPSPEED" == "true" ]; then \ EXTRA_PACKAGES="${EXTRA_PACKAGES},deepspeed"; \ fi; \ + if [ "$INSTALL_LIGER_KERNEL" == "true" ]; then \ + EXTRA_PACKAGES="${EXTRA_PACKAGES},liger-kernel"; \ + fi; \ + if [ "$INSTALL_HQQ" == "true" ]; then \ + EXTRA_PACKAGES="${EXTRA_PACKAGES},hqq"; \ + fi; \ + if [ "$INSTALL_EETQ" == "true" ]; then \ + EXTRA_PACKAGES="${EXTRA_PACKAGES},eetq"; \ + fi; \ pip install -e ".[$EXTRA_PACKAGES]" # Rebuild flash attention diff --git a/docker/docker-cuda/docker-compose.yml b/docker/docker-cuda/docker-compose.yml index 16267dc3..7af06b74 100644 --- a/docker/docker-cuda/docker-compose.yml +++ b/docker/docker-cuda/docker-compose.yml @@ -8,6 +8,9 @@ services: INSTALL_VLLM: false INSTALL_DEEPSPEED: false INSTALL_FLASHATTN: false + INSTALL_LIGER_KERNEL: false + INSTALL_HQQ: false + INSTALL_EETQ: false PIP_INDEX: https://pypi.org/simple container_name: llamafactory volumes: diff --git a/docker/docker-rocm/Dockerfile b/docker/docker-rocm/Dockerfile index a0c42d3b..62bd78f5 100644 --- a/docker/docker-rocm/Dockerfile +++ b/docker/docker-rocm/Dockerfile @@ -1,4 +1,4 @@ -FROM hardandheavy/transformers-rocm:2.1.0 +FROM hardandheavy/transformers-rocm:2.2.0 # Define environments ENV MAX_JOBS=4 @@ -10,6 +10,8 @@ ARG INSTALL_BNB=false ARG INSTALL_VLLM=false ARG INSTALL_DEEPSPEED=false ARG INSTALL_FLASHATTN=false +ARG INSTALL_LIGER_KERNEL=false +ARG INSTALL_HQQ=false ARG PIP_INDEX=https://pypi.org/simple # Set the working directory @@ -36,6 +38,12 @@ RUN EXTRA_PACKAGES="metrics"; \ if [ "$INSTALL_DEEPSPEED" == "true" ]; then \ EXTRA_PACKAGES="${EXTRA_PACKAGES},deepspeed"; \ fi; \ + if [ "$INSTALL_LIGER_KERNEL" == "true" ]; then \ + EXTRA_PACKAGES="${EXTRA_PACKAGES},liger-kernel"; \ + fi; \ + if [ "$INSTALL_HQQ" == "true" ]; then \ + EXTRA_PACKAGES="${EXTRA_PACKAGES},hqq"; \ + fi; \ pip install -e ".[$EXTRA_PACKAGES]" # Rebuild flash attention diff --git a/docker/docker-rocm/docker-compose.yml b/docker/docker-rocm/docker-compose.yml index 923bd067..2a4ea960 100644 --- a/docker/docker-rocm/docker-compose.yml +++ b/docker/docker-rocm/docker-compose.yml @@ -8,6 +8,8 @@ services: INSTALL_VLLM: false INSTALL_DEEPSPEED: false INSTALL_FLASHATTN: false + INSTALL_LIGER_KERNEL: false + INSTALL_HQQ: false PIP_INDEX: https://pypi.org/simple container_name: llamafactory volumes: diff --git a/src/llamafactory/api/chat.py b/src/llamafactory/api/chat.py index 34cf132a..d0a41515 100644 --- a/src/llamafactory/api/chat.py +++ b/src/llamafactory/api/chat.py @@ -229,8 +229,9 @@ async def create_stream_chat_completion_response( async def create_score_evaluation_response( request: "ScoreEvaluationRequest", chat_model: "ChatModel" ) -> "ScoreEvaluationResponse": + score_id = "scoreval-{}".format(uuid.uuid4().hex) if len(request.messages) == 0: raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid request") scores = await chat_model.aget_scores(request.messages, max_length=request.max_length) - return ScoreEvaluationResponse(model=request.model, scores=scores) + return ScoreEvaluationResponse(id=score_id, model=request.model, scores=scores) diff --git a/src/llamafactory/chat/hf_engine.py b/src/llamafactory/chat/hf_engine.py index 8819dc79..2b1d9fe5 100644 --- a/src/llamafactory/chat/hf_engine.py +++ b/src/llamafactory/chat/hf_engine.py @@ -246,29 +246,18 @@ class HuggingfaceEngine(BaseEngine): batch_input: List[str], input_kwargs: Optional[Dict[str, Any]] = {}, ) -> List[float]: - max_length = input_kwargs.pop("max_length", None) + max_length: Optional[int] = input_kwargs.pop("max_length", None) device = getattr(model.pretrained_model, "device", "cuda") - inputs = tokenizer( + inputs: Dict[str, "torch.Tensor"] = tokenizer( batch_input, padding=True, truncation=True, max_length=max_length or getattr(model.config, "max_position_embeddings", 1024), return_tensors="pt", - add_special_tokens=True, + add_special_tokens=False, ).to(device) - - input_ids: torch.Tensor = inputs["input_ids"] - _, _, values = model(**inputs, output_hidden_states=True, return_dict=True) - - if getattr(model.config, "model_type", None) == "chatglm": - values = torch.transpose(values, 0, 1) - - scores = [] - for i in range(input_ids.size(0)): - end_indexes = (input_ids[i] != tokenizer.pad_token_id).nonzero() - end_index = end_indexes[-1].item() if len(end_indexes) else 0 - scores.append(values[i, end_index].nan_to_num().item()) - + values: "torch.Tensor" = model(**inputs, return_dict=True, use_cache=False)[-1] + scores = values.gather(dim=-1, index=(inputs["attention_mask"].sum(dim=-1, keepdim=True) - 1)) return scores @override diff --git a/src/llamafactory/data/formatter.py b/src/llamafactory/data/formatter.py index f8b3979a..99a4a8b4 100644 --- a/src/llamafactory/data/formatter.py +++ b/src/llamafactory/data/formatter.py @@ -113,7 +113,7 @@ class FunctionFormatter(Formatter): functions.append((tool_call["name"], json.dumps(tool_call["arguments"], ensure_ascii=False))) except json.JSONDecodeError: - functions = [] + raise RuntimeError("Invalid JSON format in function message: {}".format(str([content]))) # flat string elements = [] for name, arguments in functions: @@ -141,7 +141,7 @@ class ToolFormatter(Formatter): tools = json.loads(content) return [self.tool_utils.tool_formatter(tools) if len(tools) != 0 else ""] except json.JSONDecodeError: - return [""] + raise RuntimeError("Invalid JSON format in tool description: {}".format(str([content]))) # flat string @override def extract(self, content: str) -> Union[str, List["FunctionCall"]]: diff --git a/src/llamafactory/data/template.py b/src/llamafactory/data/template.py index 75e3d340..99fca395 100644 --- a/src/llamafactory/data/template.py +++ b/src/llamafactory/data/template.py @@ -49,6 +49,7 @@ class Template: stop_words: List[str] efficient_eos: bool replace_eos: bool + replace_jinja_template: bool mm_plugin: "BasePlugin" def encode_oneturn( @@ -214,6 +215,7 @@ def _register_template( stop_words: Sequence[str] = [], efficient_eos: bool = False, replace_eos: bool = False, + replace_jinja_template: bool = True, mm_plugin: "BasePlugin" = get_mm_plugin(name="base"), ) -> None: r""" @@ -263,6 +265,7 @@ def _register_template( stop_words=stop_words, efficient_eos=efficient_eos, replace_eos=replace_eos, + replace_jinja_template=replace_jinja_template, mm_plugin=mm_plugin, ) @@ -398,10 +401,11 @@ def get_template_and_fix_tokenizer(tokenizer: "PreTrainedTokenizer", data_args: if num_added_tokens > 0: logger.warning("New tokens have been added, make sure `resize_vocab` is True.") - try: - tokenizer.chat_template = _get_jinja_template(template, tokenizer) - except ValueError: - logger.info("Cannot add this chat template to tokenizer.") + if template.replace_jinja_template: + try: + tokenizer.chat_template = _get_jinja_template(template, tokenizer) + except ValueError: + logger.info("Cannot add this chat template to tokenizer.") return template @@ -664,6 +668,7 @@ _register_template( format_separator=EmptyFormatter(slots=["\n"]), format_prefix=EmptyFormatter(slots=[{"bos_token"}]), efficient_eos=True, + replace_jinja_template=False, ) @@ -750,6 +755,7 @@ _register_template( format_prefix=EmptyFormatter(slots=[{"bos_token"}]), stop_words=["<|eot_id|>"], replace_eos=True, + replace_jinja_template=False, ) @@ -863,6 +869,7 @@ _register_template( default_system="You are a helpful assistant.", stop_words=["<|im_end|>"], replace_eos=True, + replace_jinja_template=False, ) @@ -875,6 +882,7 @@ _register_template( default_system="You are a helpful assistant.", stop_words=["<|im_end|>"], replace_eos=True, + replace_jinja_template=False, mm_plugin=get_mm_plugin(name="qwen2_vl", image_token="<|image_pad|>", video_token="<|video_pad|>"), ) diff --git a/src/llamafactory/extras/constants.py b/src/llamafactory/extras/constants.py index 6194559e..a3667249 100644 --- a/src/llamafactory/extras/constants.py +++ b/src/llamafactory/extras/constants.py @@ -238,7 +238,7 @@ register_model_group( "Breeze-7B": { DownloadSource.DEFAULT: "MediaTek-Research/Breeze-7B-Base-v1_0", }, - "Breeze-7B-Chat": { + "Breeze-7B-Instruct": { DownloadSource.DEFAULT: "MediaTek-Research/Breeze-7B-Instruct-v1_0", }, }, @@ -319,14 +319,14 @@ register_model_group( "CodeGemma-7B": { DownloadSource.DEFAULT: "google/codegemma-7b", }, - "CodeGemma-7B-Chat": { + "CodeGemma-7B-Instruct": { DownloadSource.DEFAULT: "google/codegemma-7b-it", DownloadSource.MODELSCOPE: "AI-ModelScope/codegemma-7b-it", }, "CodeGemma-1.1-2B": { DownloadSource.DEFAULT: "google/codegemma-1.1-2b", }, - "CodeGemma-1.1-7B-Chat": { + "CodeGemma-1.1-7B-Instruct": { DownloadSource.DEFAULT: "google/codegemma-1.1-7b-it", }, }, @@ -372,7 +372,7 @@ register_model_group( DownloadSource.DEFAULT: "databricks/dbrx-base", DownloadSource.MODELSCOPE: "AI-ModelScope/dbrx-base", }, - "DBRX-132B-Chat": { + "DBRX-132B-Instruct": { DownloadSource.DEFAULT: "databricks/dbrx-instruct", DownloadSource.MODELSCOPE: "AI-ModelScope/dbrx-instruct", }, @@ -403,7 +403,7 @@ register_model_group( DownloadSource.DEFAULT: "deepseek-ai/deepseek-math-7b-base", DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-math-7b-base", }, - "DeepSeek-Math-7B-Chat": { + "DeepSeek-Math-7B-Instruct": { DownloadSource.DEFAULT: "deepseek-ai/deepseek-math-7b-instruct", DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-math-7b-instruct", }, @@ -411,36 +411,36 @@ register_model_group( DownloadSource.DEFAULT: "deepseek-ai/deepseek-moe-16b-base", DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-moe-16b-base", }, - "DeepSeek-MoE-16B-v2-Base": { - DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-V2-Lite", - DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-V2-Lite", - }, - "DeepSeek-MoE-236B-Base": { - DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-V2", - DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-V2", - }, "DeepSeek-MoE-16B-Chat": { DownloadSource.DEFAULT: "deepseek-ai/deepseek-moe-16b-chat", DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-moe-16b-chat", }, - "DeepSeek-MoE-16B-v2-Chat": { + "DeepSeek-V2-16B-Base": { + DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-V2-Lite", + DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-V2-Lite", + }, + "DeepSeek-V2-236B-Base": { + DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-V2", + DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-V2", + }, + "DeepSeek-V2-16B-Chat": { DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-V2-Lite-Chat", DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-V2-Lite-Chat", }, - "DeepSeek-MoE-236B-Chat": { + "DeepSeek-V2-236B-Chat": { DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-V2-Chat", DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-V2-Chat", }, - "DeepSeek-MoE-Coder-16B-Base": { + "DeepSeek-Coder-V2-16B-Base": { DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-Coder-V2-Lite-Base", }, - "DeepSeek-MoE-Coder-236B-Base": { + "DeepSeek-Coder-V2-236B-Base": { DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-Coder-V2-Base", }, - "DeepSeek-MoE-Coder-16B-Chat": { + "DeepSeek-Coder-V2-16B-Instruct": { DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct", }, - "DeepSeek-MoE-Coder-236B-Chat": { + "DeepSeek-Coder-V2-236B-Instruct": { DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-Coder-V2-Instruct", }, }, @@ -461,14 +461,14 @@ register_model_group( DownloadSource.DEFAULT: "deepseek-ai/deepseek-coder-33b-base", DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-coder-33b-base", }, - "DeepSeekCoder-6.7B-Chat": { + "DeepSeekCoder-6.7B-Instruct": { DownloadSource.DEFAULT: "deepseek-ai/deepseek-coder-6.7b-instruct", DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-coder-6.7b-instruct", }, - "DeepSeekCoder-7B-Chat": { + "DeepSeekCoder-7B-Instruct": { DownloadSource.DEFAULT: "deepseek-ai/deepseek-coder-7b-instruct-v1.5", }, - "DeepSeekCoder-33B-Chat": { + "DeepSeekCoder-33B-Instruct": { DownloadSource.DEFAULT: "deepseek-ai/deepseek-coder-33b-instruct", DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-coder-33b-instruct", }, @@ -494,11 +494,11 @@ register_model_group( DownloadSource.DEFAULT: "tiiuae/falcon-180b", DownloadSource.MODELSCOPE: "modelscope/falcon-180B", }, - "Falcon-7B-Chat": { + "Falcon-7B-Instruct": { DownloadSource.DEFAULT: "tiiuae/falcon-7b-instruct", DownloadSource.MODELSCOPE: "AI-ModelScope/falcon-7b-instruct", }, - "Falcon-40B-Chat": { + "Falcon-40B-Instruct": { DownloadSource.DEFAULT: "tiiuae/falcon-40b-instruct", DownloadSource.MODELSCOPE: "AI-ModelScope/falcon-40b-instruct", }, @@ -521,18 +521,18 @@ register_model_group( DownloadSource.DEFAULT: "google/gemma-7b", DownloadSource.MODELSCOPE: "AI-ModelScope/gemma-2b-it", }, - "Gemma-2B-Chat": { + "Gemma-2B-Instruct": { DownloadSource.DEFAULT: "google/gemma-2b-it", DownloadSource.MODELSCOPE: "AI-ModelScope/gemma-7b", }, - "Gemma-7B-Chat": { + "Gemma-7B-Instruct": { DownloadSource.DEFAULT: "google/gemma-7b-it", DownloadSource.MODELSCOPE: "AI-ModelScope/gemma-7b-it", }, - "Gemma-1.1-2B-Chat": { + "Gemma-1.1-2B-Instruct": { DownloadSource.DEFAULT: "google/gemma-1.1-2b-it", }, - "Gemma-1.1-7B-Chat": { + "Gemma-1.1-7B-Instruct": { DownloadSource.DEFAULT: "google/gemma-1.1-7b-it", }, "Gemma-2-2B": { @@ -547,15 +547,15 @@ register_model_group( DownloadSource.DEFAULT: "google/gemma-2-27b", DownloadSource.MODELSCOPE: "LLM-Research/gemma-2-27b", }, - "Gemma-2-2B-Chat": { + "Gemma-2-2B-Instruct": { DownloadSource.DEFAULT: "google/gemma-2-2b-it", DownloadSource.MODELSCOPE: "LLM-Research/gemma-2-2b-it", }, - "Gemma-2-9B-Chat": { + "Gemma-2-9B-Instruct": { DownloadSource.DEFAULT: "google/gemma-2-9b-it", DownloadSource.MODELSCOPE: "LLM-Research/gemma-2-9b-it", }, - "Gemma-2-27B-Chat": { + "Gemma-2-27B-Instruct": { DownloadSource.DEFAULT: "google/gemma-2-27b-it", DownloadSource.MODELSCOPE: "LLM-Research/gemma-2-27b-it", }, @@ -764,11 +764,11 @@ register_model_group( DownloadSource.DEFAULT: "meta-llama/Meta-Llama-3-70B", DownloadSource.MODELSCOPE: "LLM-Research/Meta-Llama-3-70B", }, - "LLaMA3-8B-Chat": { + "LLaMA3-8B-Instruct": { DownloadSource.DEFAULT: "meta-llama/Meta-Llama-3-8B-Instruct", DownloadSource.MODELSCOPE: "LLM-Research/Meta-Llama-3-8B-Instruct", }, - "LLaMA3-70B-Chat": { + "LLaMA3-70B-Instruct": { DownloadSource.DEFAULT: "meta-llama/Meta-Llama-3-70B-Instruct", DownloadSource.MODELSCOPE: "LLM-Research/Meta-Llama-3-70B-Instruct", }, @@ -798,15 +798,15 @@ register_model_group( DownloadSource.DEFAULT: "meta-llama/Meta-Llama-3.1-405B", DownloadSource.MODELSCOPE: "LLM-Research/Meta-Llama-3.1-405B", }, - "LLaMA3.1-8B-Chat": { + "LLaMA3.1-8B-Instruct": { DownloadSource.DEFAULT: "meta-llama/Meta-Llama-3.1-8B-Instruct", DownloadSource.MODELSCOPE: "LLM-Research/Meta-Llama-3.1-8B-Instruct", }, - "LLaMA3.1-70B-Chat": { + "LLaMA3.1-70B-Instruct": { DownloadSource.DEFAULT: "meta-llama/Meta-Llama-3.1-70B-Instruct", DownloadSource.MODELSCOPE: "LLM-Research/Meta-Llama-3.1-70B-Instruct", }, - "LLaMA3.1-405B-Chat": { + "LLaMA3.1-405B-Instruct": { DownloadSource.DEFAULT: "meta-llama/Meta-Llama-3.1-405B-Instruct", DownloadSource.MODELSCOPE: "LLM-Research/Meta-Llama-3.1-405B-Instruct", }, @@ -815,6 +815,29 @@ register_model_group( ) +register_model_group( + models={ + "LLaMA3.2-1B": { + DownloadSource.DEFAULT: "meta-llama/Llama-3.2-1B", + DownloadSource.MODELSCOPE: "LLM-Research/Llama-3.2-1B", + }, + "LLaMA3.2-3B": { + DownloadSource.DEFAULT: "meta-llama/Llama-3.2-3B", + DownloadSource.MODELSCOPE: "LLM-Research/Llama-3.2-3B", + }, + "LLaMA3.2-1B-Instruct": { + DownloadSource.DEFAULT: "meta-llama/Llama-3.2-1B-Instruct", + DownloadSource.MODELSCOPE: "LLM-Research/Llama-3.2-1B-Instruct", + }, + "LLaMA3.2-3B-Instruct": { + DownloadSource.DEFAULT: "meta-llama/Llama-3.2-3B-Instruct", + DownloadSource.MODELSCOPE: "LLM-Research/Llama-3.2-3B-Instruct", + }, + }, + template="llama3", +) + + register_model_group( models={ "LLaVA1.5-7B-Chat": { @@ -904,7 +927,7 @@ register_model_group( DownloadSource.DEFAULT: "mistralai/Mistral-7B-v0.1", DownloadSource.MODELSCOPE: "AI-ModelScope/Mistral-7B-v0.1", }, - "Mistral-7B-v0.1-Chat": { + "Mistral-7B-Instruct-v0.1": { DownloadSource.DEFAULT: "mistralai/Mistral-7B-Instruct-v0.1", DownloadSource.MODELSCOPE: "AI-ModelScope/Mistral-7B-Instruct-v0.1", }, @@ -912,18 +935,18 @@ register_model_group( DownloadSource.DEFAULT: "alpindale/Mistral-7B-v0.2-hf", DownloadSource.MODELSCOPE: "AI-ModelScope/Mistral-7B-v0.2-hf", }, - "Mistral-7B-v0.2-Chat": { + "Mistral-7B-Instruct-v0.2": { DownloadSource.DEFAULT: "mistralai/Mistral-7B-Instruct-v0.2", DownloadSource.MODELSCOPE: "AI-ModelScope/Mistral-7B-Instruct-v0.2", }, "Mistral-7B-v0.3": { DownloadSource.DEFAULT: "mistralai/Mistral-7B-v0.3", }, - "Mistral-7B-v0.3-Chat": { + "Mistral-7B-Instruct-v0.3": { DownloadSource.DEFAULT: "mistralai/Mistral-7B-Instruct-v0.3", DownloadSource.MODELSCOPE: "LLM-Research/Mistral-7B-Instruct-v0.3", }, - "Mistral-Nemo-Chat": { + "Mistral-Nemo-Instruct-2407": { DownloadSource.DEFAULT: "mistralai/Mistral-Nemo-Instruct-2407", DownloadSource.MODELSCOPE: "AI-ModelScope/Mistral-Nemo-Instruct-2407", }, @@ -938,7 +961,7 @@ register_model_group( DownloadSource.DEFAULT: "mistralai/Mixtral-8x7B-v0.1", DownloadSource.MODELSCOPE: "AI-ModelScope/Mixtral-8x7B-v0.1", }, - "Mixtral-8x7B-v0.1-Chat": { + "Mixtral-8x7B-v0.1-Instruct": { DownloadSource.DEFAULT: "mistralai/Mixtral-8x7B-Instruct-v0.1", DownloadSource.MODELSCOPE: "AI-ModelScope/Mixtral-8x7B-Instruct-v0.1", }, @@ -946,7 +969,7 @@ register_model_group( DownloadSource.DEFAULT: "mistralai/Mixtral-8x22B-v0.1", DownloadSource.MODELSCOPE: "AI-ModelScope/Mixtral-8x22B-v0.1", }, - "Mixtral-8x22B-v0.1-Chat": { + "Mixtral-8x22B-v0.1-Instruct": { DownloadSource.DEFAULT: "mistralai/Mixtral-8x22B-Instruct-v0.1", DownloadSource.MODELSCOPE: "AI-ModelScope/Mixtral-8x22B-Instruct-v0.1", }, @@ -1065,27 +1088,27 @@ register_model_group( register_model_group( models={ - "Phi3-4B-4k-Chat": { + "Phi3-4B-4k-Instruct": { DownloadSource.DEFAULT: "microsoft/Phi-3-mini-4k-instruct", DownloadSource.MODELSCOPE: "LLM-Research/Phi-3-mini-4k-instruct", }, - "Phi3-4B-128k-Chat": { + "Phi3-4B-128k-Instruct": { DownloadSource.DEFAULT: "microsoft/Phi-3-mini-128k-instruct", DownloadSource.MODELSCOPE: "LLM-Research/Phi-3-mini-128k-instruct", }, - "Phi3-7B-8k-Chat": { + "Phi3-7B-8k-Instruct": { DownloadSource.DEFAULT: "microsoft/Phi-3-small-8k-instruct", DownloadSource.MODELSCOPE: "LLM-Research/Phi-3-small-8k-instruct", }, - "Phi3-7B-128k-Chat": { + "Phi3-7B-128k-Instruct": { DownloadSource.DEFAULT: "microsoft/Phi-3-small-128k-instruct", DownloadSource.MODELSCOPE: "LLM-Research/Phi-3-small-128k-instruct", }, - "Phi3-14B-8k-Chat": { + "Phi3-14B-8k-Instruct": { DownloadSource.DEFAULT: "microsoft/Phi-3-medium-4k-instruct", DownloadSource.MODELSCOPE: "LLM-Research/Phi-3-medium-4k-instruct", }, - "Phi3-14B-128k-Chat": { + "Phi3-14B-128k-Instruct": { DownloadSource.DEFAULT: "microsoft/Phi-3-medium-128k-instruct", DownloadSource.MODELSCOPE: "LLM-Research/Phi-3-medium-128k-instruct", }, @@ -1203,10 +1226,6 @@ register_model_group( DownloadSource.DEFAULT: "Qwen/Qwen1.5-MoE-A2.7B", DownloadSource.MODELSCOPE: "qwen/Qwen1.5-MoE-A2.7B", }, - "Qwen1.5-Code-7B": { - DownloadSource.DEFAULT: "Qwen/CodeQwen1.5-7B", - DownloadSource.MODELSCOPE: "qwen/CodeQwen1.5-7B", - }, "Qwen1.5-0.5B-Chat": { DownloadSource.DEFAULT: "Qwen/Qwen1.5-0.5B-Chat", DownloadSource.MODELSCOPE: "qwen/Qwen1.5-0.5B-Chat", @@ -1243,10 +1262,6 @@ register_model_group( DownloadSource.DEFAULT: "Qwen/Qwen1.5-MoE-A2.7B-Chat", DownloadSource.MODELSCOPE: "qwen/Qwen1.5-MoE-A2.7B-Chat", }, - "Qwen1.5-Code-7B-Chat": { - DownloadSource.DEFAULT: "Qwen/CodeQwen1.5-7B-Chat", - DownloadSource.MODELSCOPE: "qwen/CodeQwen1.5-7B-Chat", - }, "Qwen1.5-0.5B-int8-Chat": { DownloadSource.DEFAULT: "Qwen/Qwen1.5-0.5B-Chat-GPTQ-Int8", DownloadSource.MODELSCOPE: "qwen/Qwen1.5-0.5B-Chat-GPTQ-Int8", @@ -1307,6 +1322,14 @@ register_model_group( DownloadSource.DEFAULT: "Qwen/Qwen1.5-MoE-A2.7B-Chat-GPTQ-Int4", DownloadSource.MODELSCOPE: "qwen/Qwen1.5-MoE-A2.7B-Chat-GPTQ-Int4", }, + "Qwen1.5-Code-7B": { + DownloadSource.DEFAULT: "Qwen/CodeQwen1.5-7B", + DownloadSource.MODELSCOPE: "qwen/CodeQwen1.5-7B", + }, + "Qwen1.5-Code-7B-Chat": { + DownloadSource.DEFAULT: "Qwen/CodeQwen1.5-7B-Chat", + DownloadSource.MODELSCOPE: "qwen/CodeQwen1.5-7B-Chat", + }, "Qwen1.5-Code-7B-int4-Chat": { DownloadSource.DEFAULT: "Qwen/CodeQwen1.5-7B-Chat-AWQ", DownloadSource.MODELSCOPE: "qwen/CodeQwen1.5-7B-Chat-AWQ", @@ -1334,10 +1357,82 @@ register_model_group( DownloadSource.DEFAULT: "Qwen/Qwen2-72B", DownloadSource.MODELSCOPE: "qwen/Qwen2-72B", }, - "Qwen2-MoE-57B": { + "Qwen2-MoE-57B-A14B": { DownloadSource.DEFAULT: "Qwen/Qwen2-57B-A14B", DownloadSource.MODELSCOPE: "qwen/Qwen2-57B-A14B", }, + "Qwen2-0.5B-Instruct": { + DownloadSource.DEFAULT: "Qwen/Qwen2-0.5B-Instruct", + DownloadSource.MODELSCOPE: "qwen/Qwen2-0.5B-Instruct", + }, + "Qwen2-1.5B-Instruct": { + DownloadSource.DEFAULT: "Qwen/Qwen2-1.5B-Instruct", + DownloadSource.MODELSCOPE: "qwen/Qwen2-1.5B-Instruct", + }, + "Qwen2-7B-Instruct": { + DownloadSource.DEFAULT: "Qwen/Qwen2-7B-Instruct", + DownloadSource.MODELSCOPE: "qwen/Qwen2-7B-Instruct", + }, + "Qwen2-72B-Instruct": { + DownloadSource.DEFAULT: "Qwen/Qwen2-72B-Instruct", + DownloadSource.MODELSCOPE: "qwen/Qwen2-72B-Instruct", + }, + "Qwen2-MoE-57B-A14B-Instruct": { + DownloadSource.DEFAULT: "Qwen/Qwen2-57B-A14B-Instruct", + DownloadSource.MODELSCOPE: "qwen/Qwen2-57B-A14B-Instruct", + }, + "Qwen2-0.5B-Instruct-GPTQ-Int8": { + DownloadSource.DEFAULT: "Qwen/Qwen2-0.5B-Instruct-GPTQ-Int8", + DownloadSource.MODELSCOPE: "qwen/Qwen2-0.5B-Instruct-GPTQ-Int8", + }, + "Qwen2-0.5B-Instruct-GPTQ-Int4": { + DownloadSource.DEFAULT: "Qwen/Qwen2-0.5B-Instruct-GPTQ-Int4", + DownloadSource.MODELSCOPE: "qwen/Qwen2-0.5B-Instruct-GPTQ-Int4", + }, + "Qwen2-0.5B-Instruct-AWQ": { + DownloadSource.DEFAULT: "Qwen/Qwen2-0.5B-Instruct-AWQ", + DownloadSource.MODELSCOPE: "qwen/Qwen2-0.5B-Instruct-AWQ", + }, + "Qwen2-1.5B-Instruct-GPTQ-Int8": { + DownloadSource.DEFAULT: "Qwen/Qwen2-1.5B-Instruct-GPTQ-Int8", + DownloadSource.MODELSCOPE: "qwen/Qwen2-1.5B-Instruct-GPTQ-Int8", + }, + "Qwen2-1.5B-Instruct-GPTQ-Int4": { + DownloadSource.DEFAULT: "Qwen/Qwen2-1.5B-Instruct-GPTQ-Int4", + DownloadSource.MODELSCOPE: "qwen/Qwen2-1.5B-Instruct-GPTQ-Int4", + }, + "Qwen2-1.5B-Instruct-AWQ": { + DownloadSource.DEFAULT: "Qwen/Qwen2-1.5B-Instruct-AWQ", + DownloadSource.MODELSCOPE: "qwen/Qwen2-1.5B-Instruct-AWQ", + }, + "Qwen2-7B-Instruct-GPTQ-Int8": { + DownloadSource.DEFAULT: "Qwen/Qwen2-7B-Instruct-GPTQ-Int8", + DownloadSource.MODELSCOPE: "qwen/Qwen2-7B-Instruct-GPTQ-Int8", + }, + "Qwen2-7B-Instruct-GPTQ-Int4": { + DownloadSource.DEFAULT: "Qwen/Qwen2-7B-Instruct-GPTQ-Int4", + DownloadSource.MODELSCOPE: "qwen/Qwen2-7B-Instruct-GPTQ-Int4", + }, + "Qwen2-7B-Instruct-AWQ": { + DownloadSource.DEFAULT: "Qwen/Qwen2-7B-Instruct-AWQ", + DownloadSource.MODELSCOPE: "qwen/Qwen2-7B-Instruct-AWQ", + }, + "Qwen2-72B-Instruct-GPTQ-Int8": { + DownloadSource.DEFAULT: "Qwen/Qwen2-72B-Instruct-GPTQ-Int8", + DownloadSource.MODELSCOPE: "qwen/Qwen2-72B-Instruct-GPTQ-Int8", + }, + "Qwen2-72B-Instruct-GPTQ-Int4": { + DownloadSource.DEFAULT: "Qwen/Qwen2-72B-Instruct-GPTQ-Int4", + DownloadSource.MODELSCOPE: "qwen/Qwen2-72B-Instruct-GPTQ-Int4", + }, + "Qwen2-72B-Instruct-AWQ": { + DownloadSource.DEFAULT: "Qwen/Qwen2-72B-Instruct-AWQ", + DownloadSource.MODELSCOPE: "qwen/Qwen2-72B-Instruct-AWQ", + }, + "Qwen2-57B-A14B-Instruct-GPTQ-Int4": { + DownloadSource.DEFAULT: "Qwen/Qwen2-57B-A14B-Instruct-GPTQ-Int4", + DownloadSource.MODELSCOPE: "qwen/Qwen2-57B-A14B-Instruct-GPTQ-Int4", + }, "Qwen2-Math-1.5B": { DownloadSource.DEFAULT: "Qwen/Qwen2-Math-1.5B", DownloadSource.MODELSCOPE: "qwen/Qwen2-Math-1.5B", @@ -1350,74 +1445,18 @@ register_model_group( DownloadSource.DEFAULT: "Qwen/Qwen2-Math-72B", DownloadSource.MODELSCOPE: "qwen/Qwen2-Math-72B", }, - "Qwen2-0.5B-Chat": { - DownloadSource.DEFAULT: "Qwen/Qwen2-0.5B-Instruct", - DownloadSource.MODELSCOPE: "qwen/Qwen2-0.5B-Instruct", - }, - "Qwen2-1.5B-Chat": { - DownloadSource.DEFAULT: "Qwen/Qwen2-1.5B-Instruct", - DownloadSource.MODELSCOPE: "qwen/Qwen2-1.5B-Instruct", - }, - "Qwen2-7B-Chat": { - DownloadSource.DEFAULT: "Qwen/Qwen2-7B-Instruct", - DownloadSource.MODELSCOPE: "qwen/Qwen2-7B-Instruct", - }, - "Qwen2-72B-Chat": { - DownloadSource.DEFAULT: "Qwen/Qwen2-72B-Instruct", - DownloadSource.MODELSCOPE: "qwen/Qwen2-72B-Instruct", - }, - "Qwen2-MoE-57B-Chat": { - DownloadSource.DEFAULT: "Qwen/Qwen2-57B-A14B-Instruct", - DownloadSource.MODELSCOPE: "qwen/Qwen2-57B-A14B-Instruct", - }, - "Qwen2-Math-1.5B-Chat": { + "Qwen2-Math-1.5B-Instruct": { DownloadSource.DEFAULT: "Qwen/Qwen2-Math-1.5B-Instruct", DownloadSource.MODELSCOPE: "qwen/Qwen2-Math-1.5B-Instruct", }, - "Qwen2-Math-7B-Chat": { + "Qwen2-Math-7B-Instruct": { DownloadSource.DEFAULT: "Qwen/Qwen2-Math-7B-Instruct", DownloadSource.MODELSCOPE: "qwen/Qwen2-Math-7B-Instruct", }, - "Qwen2-Math-72B-Chat": { + "Qwen2-Math-72B-Instruct": { DownloadSource.DEFAULT: "Qwen/Qwen2-Math-72B-Instruct", DownloadSource.MODELSCOPE: "qwen/Qwen2-Math-72B-Instruct", }, - "Qwen2-0.5B-int8-Chat": { - DownloadSource.DEFAULT: "Qwen/Qwen2-0.5B-Instruct-GPTQ-Int8", - DownloadSource.MODELSCOPE: "qwen/Qwen2-0.5B-Instruct-GPTQ-Int8", - }, - "Qwen2-0.5B-int4-Chat": { - DownloadSource.DEFAULT: "Qwen/Qwen2-0.5B-Instruct-AWQ", - DownloadSource.MODELSCOPE: "qwen/Qwen2-0.5B-Instruct-AWQ", - }, - "Qwen2-1.5B-int8-Chat": { - DownloadSource.DEFAULT: "Qwen/Qwen2-1.5B-Instruct-GPTQ-Int8", - DownloadSource.MODELSCOPE: "qwen/Qwen2-1.5B-Instruct-GPTQ-Int8", - }, - "Qwen2-1.5B-int4-Chat": { - DownloadSource.DEFAULT: "Qwen/Qwen2-1.5B-Instruct-AWQ", - DownloadSource.MODELSCOPE: "qwen/Qwen2-1.5B-Instruct-AWQ", - }, - "Qwen2-7B-int8-Chat": { - DownloadSource.DEFAULT: "Qwen/Qwen2-7B-Instruct-GPTQ-Int8", - DownloadSource.MODELSCOPE: "qwen/Qwen2-7B-Instruct-GPTQ-Int8", - }, - "Qwen2-7B-int4-Chat": { - DownloadSource.DEFAULT: "Qwen/Qwen2-7B-Instruct-AWQ", - DownloadSource.MODELSCOPE: "qwen/Qwen2-7B-Instruct-AWQ", - }, - "Qwen2-72B-int8-Chat": { - DownloadSource.DEFAULT: "Qwen/Qwen2-72B-Instruct-GPTQ-Int8", - DownloadSource.MODELSCOPE: "qwen/Qwen2-72B-Instruct-GPTQ-Int8", - }, - "Qwen2-72B-int4-Chat": { - DownloadSource.DEFAULT: "Qwen/Qwen2-72B-Instruct-AWQ", - DownloadSource.MODELSCOPE: "qwen/Qwen2-72B-Instruct-AWQ", - }, - "Qwen2-MoE-57B-int4-Chat": { - DownloadSource.DEFAULT: "Qwen/Qwen2-57B-A14B-Instruct-GPTQ-Int4", - DownloadSource.MODELSCOPE: "qwen/Qwen2-57B-A14B-Instruct-GPTQ-Int4", - }, }, template="qwen", ) @@ -1425,30 +1464,241 @@ register_model_group( register_model_group( models={ - "Qwen2VL-2B-Chat": { + "Qwen2.5-0.5B": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-0.5B", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-0.5B", + }, + "Qwen2.5-1.5B": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-1.5B", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-1.5B", + }, + "Qwen2.5-3B": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-3B", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-3B", + }, + "Qwen2.5-7B": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-7B", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-7B", + }, + "Qwen2.5-14B": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-14B", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-14B", + }, + "Qwen2.5-32B": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-32B", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-32B", + }, + "Qwen2.5-72B": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-72B", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-72B", + }, + "Qwen2.5-0.5B-Instruct": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-0.5B-Instruct", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-0.5B-Instruct", + }, + "Qwen2.5-1.5B-Instruct": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-1.5B-Instruct", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-1.5B-Instruct", + }, + "Qwen2.5-3B-Instruct": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-3B-Instruct", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-3B-Instruct", + }, + "Qwen2.5-7B-Instruct": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-7B-Instruct", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-7B-Instruct", + }, + "Qwen2.5-14B-Instruct": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-14B-Instruct", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-14B-Instruct", + }, + "Qwen2.5-32B-Instruct": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-32B-Instruct", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-32B-Instruct", + }, + "Qwen2.5-72B-Instruct": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-72B-Instruct", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-72B-Instruct", + }, + "Qwen2.5-0.5B-Instruct-GPTQ-Int8": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-0.5B-Instruct-GPTQ-Int8", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-0.5B-Instruct-GPTQ-Int8", + }, + "Qwen2.5-0.5B-Instruct-GPTQ-Int4": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-0.5B-Instruct-GPTQ-Int4", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-0.5B-Instruct-GPTQ-Int4", + }, + "Qwen2.5-0.5B-Instruct-AWQ": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-0.5B-Instruct-AWQ", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-0.5B-Instruct-AWQ", + }, + "Qwen2.5-1.5B-Instruct-GPTQ-Int8": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-1.5B-Instruct-GPTQ-Int8", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-1.5B-Instruct-GPTQ-Int8", + }, + "Qwen2.5-1.5B-Instruct-GPTQ-Int4": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-1.5B-Instruct-GPTQ-Int4", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-1.5B-Instruct-GPTQ-Int4", + }, + "Qwen2.5-1.5B-Instruct-AWQ": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-1.5B-Instruct-AWQ", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-1.5B-Instruct-AWQ", + }, + "Qwen2.5-3B-Instruct-GPTQ-Int8": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-3B-Instruct-GPTQ-Int8", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-3B-Instruct-GPTQ-Int8", + }, + "Qwen2.5-3B-Instruct-GPTQ-Int4": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-3B-Instruct-GPTQ-Int4", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-3B-Instruct-GPTQ-Int4", + }, + "Qwen2.5-3B-Instruct-AWQ": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-3B-Instruct-AWQ", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-3B-Instruct-AWQ", + }, + "Qwen2.5-7B-Instruct-GPTQ-Int8": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-7B-Instruct-GPTQ-Int8", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-7B-Instruct-GPTQ-Int8", + }, + "Qwen2.5-7B-Instruct-GPTQ-Int4": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-7B-Instruct-GPTQ-Int4", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-7B-Instruct-GPTQ-Int4", + }, + "Qwen2.5-7B-Instruct-AWQ": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-7B-Instruct-AWQ", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-7B-Instruct-AWQ", + }, + "Qwen2.5-14B-Instruct-GPTQ-Int8": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-14B-Instruct-GPTQ-Int8", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-14B-Instruct-GPTQ-Int8", + }, + "Qwen2.5-14B-Instruct-GPTQ-Int4": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-14B-Instruct-GPTQ-Int4", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-14B-Instruct-GPTQ-Int4", + }, + "Qwen2.5-14B-Instruct-AWQ": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-14B-Instruct-AWQ", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-14B-Instruct-AWQ", + }, + "Qwen2.5-32B-Instruct-GPTQ-Int8": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-32B-Instruct-GPTQ-Int8", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-32B-Instruct-GPTQ-Int8", + }, + "Qwen2.5-32B-Instruct-GPTQ-Int4": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-32B-Instruct-GPTQ-Int4", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-32B-Instruct-GPTQ-Int4", + }, + "Qwen2.5-32B-Instruct-AWQ": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-32B-Instruct-AWQ", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-32B-Instruct-AWQ", + }, + "Qwen2.5-72B-Instruct-GPTQ-Int8": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-72B-Instruct-GPTQ-Int8", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-72B-Instruct-GPTQ-Int8", + }, + "Qwen2.5-72B-Instruct-GPTQ-Int4": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-72B-Instruct-GPTQ-Int4", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-72B-Instruct-GPTQ-Int4", + }, + "Qwen2.5-72B-Instruct-AWQ": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-72B-Instruct-AWQ", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-72B-Instruct-AWQ", + }, + "Qwen2.5-Coder-1.5B": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-Coder-1.5B", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-Coder-1.5B", + }, + "Qwen2.5-Coder-7B": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-Coder-7B", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-Coder-7B", + }, + "Qwen2.5-Coder-1.5B-Instruct": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-Coder-1.5B-Instruct", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-Coder-1.5B-Instruct", + }, + "Qwen2.5-Coder-7B-Instruct": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-Coder-7B-Instruct", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-Coder-7B-Instruct", + }, + "Qwen2.5-Math-1.5B": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-Math-1.5B", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-Math-1.5B", + }, + "Qwen2.5-Math-7B": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-Math-7B", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-Math-7B", + }, + "Qwen2.5-Math-72B": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-Math-72B", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-Math-72B", + }, + "Qwen2.5-Math-1.5B-Instruct": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-Math-1.5B-Instruct", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-Coder-1.5B-Instruct", + }, + "Qwen2.5-Math-7B-Instruct": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-Math-7B-Instruct", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-Coder-7B-Instruct", + }, + "Qwen2.5-Math-72B-Instruct": { + DownloadSource.DEFAULT: "Qwen/Qwen2.5-Math-72B-Instruct", + DownloadSource.MODELSCOPE: "qwen/Qwen2.5-Coder-72B-Instruct", + }, + }, + template="qwen", +) + + +register_model_group( + models={ + "Qwen2VL-2B-Instruct": { DownloadSource.DEFAULT: "Qwen/Qwen2-VL-2B-Instruct", DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-2B-Instruct", }, - "Qwen2VL-7B-Chat": { + "Qwen2VL-7B-Instruct": { DownloadSource.DEFAULT: "Qwen/Qwen2-VL-7B-Instruct", DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-7B-Instruct", }, - "Qwen2VL-2B-int8-Chat": { + "Qwen2VL-72B-Instruct": { + DownloadSource.DEFAULT: "Qwen/Qwen2-VL-72B-Instruct", + DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-72B-Instruct", + }, + "Qwen2VL-2B-Instruct-GPTQ-Int8": { DownloadSource.DEFAULT: "Qwen/Qwen2-VL-2B-Instruct-GPTQ-Int8", DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-2B-Instruct-GPTQ-Int8", }, - "Qwen2VL-2B-int4-Chat": { + "Qwen2VL-2B-Instruct-GPTQ-Int4": { + DownloadSource.DEFAULT: "Qwen/Qwen2-VL-2B-Instruct-GPTQ-Int4", + DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-2B-Instruct-GPTQ-Int4", + }, + "Qwen2VL-2B-Instruct-AWQ": { DownloadSource.DEFAULT: "Qwen/Qwen2-VL-2B-Instruct-AWQ", DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-2B-Instruct-AWQ", }, - "Qwen2VL-7B-int8-Chat": { + "Qwen2VL-7B-Instruct-GPTQ-Int8": { DownloadSource.DEFAULT: "Qwen/Qwen2-VL-7B-Instruct-GPTQ-Int8", DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-7B-Instruct-GPTQ-Int8", }, - "Qwen2VL-7B-int4-Chat": { + "Qwen2VL-7B-Instruct-GPTQ-Int4": { + DownloadSource.DEFAULT: "Qwen/Qwen2-VL-7B-Instruct-GPTQ-Int4", + DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-7B-Instruct-GPTQ-Int4", + }, + "Qwen2VL-7B-Instruct-AWQ": { DownloadSource.DEFAULT: "Qwen/Qwen2-VL-7B-Instruct-AWQ", DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-7B-Instruct-AWQ", }, + "Qwen2VL-72B-Instruct-GPTQ-Int8": { + DownloadSource.DEFAULT: "Qwen/Qwen2-VL-72B-Instruct-GPTQ-Int8", + DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-72B-Instruct-GPTQ-Int8", + }, + "Qwen2VL-72B-Instruct-GPTQ-Int4": { + DownloadSource.DEFAULT: "Qwen/Qwen2-VL-72B-Instruct-GPTQ-Int4", + DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-72B-Instruct-GPTQ-Int4", + }, + "Qwen2VL-72B-Instruct-AWQ": { + DownloadSource.DEFAULT: "Qwen/Qwen2-VL-72B-Instruct-AWQ", + DownloadSource.MODELSCOPE: "qwen/Qwen2-VL-72B-Instruct-AWQ", + }, }, template="qwen2_vl", vision=True, @@ -1457,10 +1707,10 @@ register_model_group( register_model_group( models={ - "SOLAR-10.7B": { + "SOLAR-10.7B-v1.0": { DownloadSource.DEFAULT: "upstage/SOLAR-10.7B-v1.0", }, - "SOLAR-10.7B-Chat": { + "SOLAR-10.7B-Instruct-v1.0": { DownloadSource.DEFAULT: "upstage/SOLAR-10.7B-Instruct-v1.0", DownloadSource.MODELSCOPE: "AI-ModelScope/SOLAR-10.7B-Instruct-v1.0", }, @@ -1635,23 +1885,23 @@ register_model_group( DownloadSource.DEFAULT: "xverse/XVERSE-MoE-A4.2B", DownloadSource.MODELSCOPE: "xverse/XVERSE-MoE-A4.2B", }, - "XVERSE-7B-int8-Chat": { + "XVERSE-7B-Chat-GPTQ-Int8": { DownloadSource.DEFAULT: "xverse/XVERSE-7B-Chat-GPTQ-Int8", DownloadSource.MODELSCOPE: "xverse/XVERSE-7B-Chat-GPTQ-Int8", }, - "XVERSE-7B-int4-Chat": { + "XVERSE-7B-Chat-GPTQ-Int4": { DownloadSource.DEFAULT: "xverse/XVERSE-7B-Chat-GPTQ-Int4", DownloadSource.MODELSCOPE: "xverse/XVERSE-7B-Chat-GPTQ-Int4", }, - "XVERSE-13B-int8-Chat": { + "XVERSE-13B-Chat-GPTQ-Int8": { DownloadSource.DEFAULT: "xverse/XVERSE-13B-Chat-GPTQ-Int8", DownloadSource.MODELSCOPE: "xverse/XVERSE-13B-Chat-GPTQ-Int8", }, - "XVERSE-13B-int4-Chat": { + "XVERSE-13B-Chat-GPTQ-Int4": { DownloadSource.DEFAULT: "xverse/XVERSE-13B-Chat-GPTQ-Int4", DownloadSource.MODELSCOPE: "xverse/XVERSE-13B-Chat-GPTQ-Int4", }, - "XVERSE-65B-int4-Chat": { + "XVERSE-65B-Chat-GPTQ-Int4": { DownloadSource.DEFAULT: "xverse/XVERSE-65B-Chat-GPTQ-Int4", DownloadSource.MODELSCOPE: "xverse/XVERSE-65B-Chat-GPTQ-Int4", }, diff --git a/src/llamafactory/extras/env.py b/src/llamafactory/extras/env.py index 741defea..68857c1a 100644 --- a/src/llamafactory/extras/env.py +++ b/src/llamafactory/extras/env.py @@ -26,7 +26,7 @@ import trl from transformers.utils import is_torch_cuda_available, is_torch_npu_available -VERSION = "0.9.0" +VERSION = "0.9.1.dev0" def print_env() -> None: diff --git a/src/llamafactory/model/model_utils/liger_kernel.py b/src/llamafactory/model/model_utils/liger_kernel.py index 81c1132d..9f9cd20d 100644 --- a/src/llamafactory/model/model_utils/liger_kernel.py +++ b/src/llamafactory/model/model_utils/liger_kernel.py @@ -45,6 +45,8 @@ def configure_liger_kernel(config: "PretrainedConfig", model_args: "ModelArgumen from liger_kernel.transformers import apply_liger_kernel_to_phi3 as apply_liger_kernel elif model_type == "qwen2": from liger_kernel.transformers import apply_liger_kernel_to_qwen2 as apply_liger_kernel + elif model_type == "qwen2_vl": + from liger_kernel.transformers import apply_liger_kernel_to_qwen2_vl as apply_liger_kernel else: logger.warning("Current model does not support liger kernel.") return diff --git a/src/llamafactory/train/ppo/ppo_utils.py b/src/llamafactory/train/ppo/ppo_utils.py index 05c40946..27031a45 100644 --- a/src/llamafactory/train/ppo/ppo_utils.py +++ b/src/llamafactory/train/ppo/ppo_utils.py @@ -31,7 +31,7 @@ if TYPE_CHECKING: from trl import AutoModelForCausalLMWithValueHead -def get_rewards_from_server(server_url: str, messages: List[str]) -> List[torch.Tensor]: +def get_rewards_from_server(server_url: str, messages: List[str]) -> List["torch.Tensor"]: r""" Gets reward scores from the API server. """ @@ -66,7 +66,7 @@ def replace_model(model: "AutoModelForCausalLMWithValueHead", target: Literal["d v_head_layer.bias.data = model.get_buffer("{}_head_bias".format(target)).detach().clone().to(device) -def dump_layernorm(model: "PreTrainedModel") -> Dict[str, torch.Tensor]: +def dump_layernorm(model: "PreTrainedModel") -> Dict[str, "torch.Tensor"]: r""" Dumps the layernorm parameters in the model. The model is already unwrapped (and gathered). """ @@ -79,7 +79,7 @@ def dump_layernorm(model: "PreTrainedModel") -> Dict[str, torch.Tensor]: return layer_norm_params -def restore_layernorm(model: "PreTrainedModel", layernorm_params: Optional[Dict[str, torch.Tensor]] = None) -> None: +def restore_layernorm(model: "PreTrainedModel", layernorm_params: Optional[Dict[str, "torch.Tensor"]] = None) -> None: r""" Restores the layernorm parameters in the model. The model is already unwrapped (and gathered). """ diff --git a/src/llamafactory/train/ppo/trainer.py b/src/llamafactory/train/ppo/trainer.py index c8d176a1..a75a9aa0 100644 --- a/src/llamafactory/train/ppo/trainer.py +++ b/src/llamafactory/train/ppo/trainer.py @@ -392,7 +392,7 @@ class CustomPPOTrainer(PPOTrainer, Trainer): """ if self.finetuning_args.reward_model_type == "api": token_ids = [torch.cat((q, r), dim=-1).tolist() for q, r in zip(queries, responses)] - messages = self.tokenizer.batch_decode(token_ids, skip_special_tokens=True) + messages = self.tokenizer.batch_decode(token_ids, skip_special_tokens=False) return get_rewards_from_server(self.reward_model, messages) batch: Dict[str, "torch.Tensor"] = self.prepare_model_inputs(queries, responses) @@ -405,7 +405,7 @@ class CustomPPOTrainer(PPOTrainer, Trainer): reward_model = self.reward_model with unwrap_model_for_generation(reward_model, self.accelerator), self.amp_context: # support bf16 - _, _, values = reward_model(**batch, return_dict=True, use_cache=False) + values: "torch.Tensor" = reward_model(**batch, return_dict=True, use_cache=False)[-1] if self.finetuning_args.reward_model_type == "lora": replace_model(unwrapped_model, target="default") diff --git a/src/llamafactory/webui/common.py b/src/llamafactory/webui/common.py index 019812c7..0ad2929e 100644 --- a/src/llamafactory/webui/common.py +++ b/src/llamafactory/webui/common.py @@ -137,8 +137,13 @@ def get_template(model_name: str) -> str: r""" Gets the template name if the model is a chat model. """ - if model_name and model_name.endswith("Chat") and get_prefix(model_name) in DEFAULT_TEMPLATE: + if ( + model_name + and any(suffix in model_name for suffix in ("-Chat", "-Instruct")) + and get_prefix(model_name) in DEFAULT_TEMPLATE + ): return DEFAULT_TEMPLATE[get_prefix(model_name)] + return "default" diff --git a/tests/data/test_template.py b/tests/data/test_template.py index a327df22..18d03958 100644 --- a/tests/data/test_template.py +++ b/tests/data/test_template.py @@ -19,6 +19,7 @@ import pytest from transformers import AutoTokenizer from llamafactory.data import get_template_and_fix_tokenizer +from llamafactory.data.template import _get_jinja_template from llamafactory.hparams import DataArguments @@ -117,7 +118,8 @@ def test_encode_multiturn(use_fast: bool): def test_jinja_template(use_fast: bool): tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA, use_fast=use_fast) ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA, use_fast=use_fast) - get_template_and_fix_tokenizer(tokenizer, DataArguments(template="llama3")) + template = get_template_and_fix_tokenizer(tokenizer, DataArguments(template="llama3")) + tokenizer.chat_template = _get_jinja_template(template, tokenizer) # llama3 template no replace assert tokenizer.chat_template != ref_tokenizer.chat_template assert tokenizer.apply_chat_template(MESSAGES) == ref_tokenizer.apply_chat_template(MESSAGES)