mirror of
				https://github.com/hiyouga/LLaMA-Factory.git
				synced 2025-11-04 18:02:19 +08:00 
			
		
		
		
	lora modules: all by default
Former-commit-id: 52c4ae87c7f4312704c31ef26b079b2c5b95ea5f
This commit is contained in:
		
							parent
							
								
									abc2a73a33
								
							
						
					
					
						commit
						937f49ec3d
					
				
							
								
								
									
										56
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										56
									
								
								README.md
									
									
									
									
									
								
							@ -149,34 +149,34 @@ Compared to ChatGLM's [P-Tuning](https://github.com/THUDM/ChatGLM2-6B/tree/main/
 | 
			
		||||
 | 
			
		||||
## Supported Models
 | 
			
		||||
 | 
			
		||||
| Model                                                    | Model size                       | Default module    | Template  |
 | 
			
		||||
| -------------------------------------------------------- | -------------------------------- | ----------------- | --------- |
 | 
			
		||||
| [Baichuan2](https://huggingface.co/baichuan-inc)         | 7B/13B                           | W_pack            | baichuan2 |
 | 
			
		||||
| [BLOOM](https://huggingface.co/bigscience)               | 560M/1.1B/1.7B/3B/7.1B/176B      | query_key_value   | -         |
 | 
			
		||||
| [BLOOMZ](https://huggingface.co/bigscience)              | 560M/1.1B/1.7B/3B/7.1B/176B      | query_key_value   | -         |
 | 
			
		||||
| [ChatGLM3](https://huggingface.co/THUDM)                 | 6B                               | query_key_value   | chatglm3  |
 | 
			
		||||
| [Command-R](https://huggingface.co/CohereForAI)          | 35B/104B                         | q_proj,v_proj     | cohere    |
 | 
			
		||||
| [DeepSeek (MoE)](https://huggingface.co/deepseek-ai)     | 7B/16B/67B/236B                  | q_proj,v_proj     | deepseek  |
 | 
			
		||||
| [Falcon](https://huggingface.co/tiiuae)                  | 7B/11B/40B/180B                  | query_key_value   | falcon    |
 | 
			
		||||
| [Gemma/CodeGemma](https://huggingface.co/google)         | 2B/7B                            | q_proj,v_proj     | gemma     |
 | 
			
		||||
| [GLM4](https://huggingface.co/THUDM)                     | 9B                               | query_key_value   | glm4      |
 | 
			
		||||
| [InternLM2](https://huggingface.co/internlm)             | 7B/20B                           | wqkv              | intern2   |
 | 
			
		||||
| [LLaMA](https://github.com/facebookresearch/llama)       | 7B/13B/33B/65B                   | q_proj,v_proj     | -         |
 | 
			
		||||
| [LLaMA-2](https://huggingface.co/meta-llama)             | 7B/13B/70B                       | q_proj,v_proj     | llama2    |
 | 
			
		||||
| [LLaMA-3](https://huggingface.co/meta-llama)             | 8B/70B                           | q_proj,v_proj     | llama3    |
 | 
			
		||||
| [LLaVA-1.5](https://huggingface.co/llava-hf)             | 7B/13B                           | q_proj,v_proj     | vicuna    |
 | 
			
		||||
| [Mistral/Mixtral](https://huggingface.co/mistralai)      | 7B/8x7B/8x22B                    | q_proj,v_proj     | mistral   |
 | 
			
		||||
| [OLMo](https://huggingface.co/allenai)                   | 1B/7B                            | q_proj,v_proj     | -         |
 | 
			
		||||
| [PaliGemma](https://huggingface.co/google)               | 3B                               | q_proj,v_proj     | gemma     |
 | 
			
		||||
| [Phi-1.5/2](https://huggingface.co/microsoft)            | 1.3B/2.7B                        | q_proj,v_proj     | -         |
 | 
			
		||||
| [Phi-3](https://huggingface.co/microsoft)                | 4B/7B/14B                        | qkv_proj          | phi       |
 | 
			
		||||
| [Qwen](https://huggingface.co/Qwen)                      | 1.8B/7B/14B/72B                  | c_attn            | qwen      |
 | 
			
		||||
| [Qwen1.5 (Code/MoE)](https://huggingface.co/Qwen)        | 0.5B/1.8B/4B/7B/14B/32B/72B/110B | q_proj,v_proj     | qwen      |
 | 
			
		||||
| [StarCoder2](https://huggingface.co/bigcode)             | 3B/7B/15B                        | q_proj,v_proj     | -         |
 | 
			
		||||
| [XVERSE](https://huggingface.co/xverse)                  | 7B/13B/65B                       | q_proj,v_proj     | xverse    |
 | 
			
		||||
| [Yi (1/1.5)](https://huggingface.co/01-ai)               | 6B/9B/34B                        | q_proj,v_proj     | yi        |
 | 
			
		||||
| [Yi-VL](https://huggingface.co/01-ai)                    | 6B/34B                           | q_proj,v_proj     | yi_vl     |
 | 
			
		||||
| [Yuan](https://huggingface.co/IEITYuan)                  | 2B/51B/102B                      | q_proj,v_proj     | yuan      |
 | 
			
		||||
| Model                                                    | Model size                       | Template  |
 | 
			
		||||
| -------------------------------------------------------- | -------------------------------- | --------- |
 | 
			
		||||
| [Baichuan2](https://huggingface.co/baichuan-inc)         | 7B/13B                           | baichuan2 |
 | 
			
		||||
| [BLOOM](https://huggingface.co/bigscience)               | 560M/1.1B/1.7B/3B/7.1B/176B      | -         |
 | 
			
		||||
| [BLOOMZ](https://huggingface.co/bigscience)              | 560M/1.1B/1.7B/3B/7.1B/176B      | -         |
 | 
			
		||||
| [ChatGLM3](https://huggingface.co/THUDM)                 | 6B                               | chatglm3  |
 | 
			
		||||
| [Command-R](https://huggingface.co/CohereForAI)          | 35B/104B                         | cohere    |
 | 
			
		||||
| [DeepSeek (MoE)](https://huggingface.co/deepseek-ai)     | 7B/16B/67B/236B                  | deepseek  |
 | 
			
		||||
| [Falcon](https://huggingface.co/tiiuae)                  | 7B/11B/40B/180B                  | falcon    |
 | 
			
		||||
| [Gemma/CodeGemma](https://huggingface.co/google)         | 2B/7B                            | gemma     |
 | 
			
		||||
| [GLM4](https://huggingface.co/THUDM)                     | 9B                               | glm4      |
 | 
			
		||||
| [InternLM2](https://huggingface.co/internlm)             | 7B/20B                           | intern2   |
 | 
			
		||||
| [LLaMA](https://github.com/facebookresearch/llama)       | 7B/13B/33B/65B                   | -         |
 | 
			
		||||
| [LLaMA-2](https://huggingface.co/meta-llama)             | 7B/13B/70B                       | llama2    |
 | 
			
		||||
| [LLaMA-3](https://huggingface.co/meta-llama)             | 8B/70B                           | llama3    |
 | 
			
		||||
| [LLaVA-1.5](https://huggingface.co/llava-hf)             | 7B/13B                           | vicuna    |
 | 
			
		||||
| [Mistral/Mixtral](https://huggingface.co/mistralai)      | 7B/8x7B/8x22B                    | mistral   |
 | 
			
		||||
| [OLMo](https://huggingface.co/allenai)                   | 1B/7B                            | -         |
 | 
			
		||||
| [PaliGemma](https://huggingface.co/google)               | 3B                               | gemma     |
 | 
			
		||||
| [Phi-1.5/2](https://huggingface.co/microsoft)            | 1.3B/2.7B                        | -         |
 | 
			
		||||
| [Phi-3](https://huggingface.co/microsoft)                | 4B/7B/14B                        | phi       |
 | 
			
		||||
| [Qwen](https://huggingface.co/Qwen)                      | 1.8B/7B/14B/72B                  | qwen      |
 | 
			
		||||
| [Qwen1.5 (Code/MoE)](https://huggingface.co/Qwen)        | 0.5B/1.8B/4B/7B/14B/32B/72B/110B | qwen      |
 | 
			
		||||
| [StarCoder2](https://huggingface.co/bigcode)             | 3B/7B/15B                        | -         |
 | 
			
		||||
| [XVERSE](https://huggingface.co/xverse)                  | 7B/13B/65B                       | xverse    |
 | 
			
		||||
| [Yi (1/1.5)](https://huggingface.co/01-ai)               | 6B/9B/34B                        | yi        |
 | 
			
		||||
| [Yi-VL](https://huggingface.co/01-ai)                    | 6B/34B                           | yi_vl     |
 | 
			
		||||
| [Yuan](https://huggingface.co/IEITYuan)                  | 2B/51B/102B                      | yuan      |
 | 
			
		||||
 | 
			
		||||
> [!NOTE]
 | 
			
		||||
> **Default module** is used for the `lora_target` argument, you can use `lora_target: all` to specify all the available modules for better convergence.
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										60
									
								
								README_zh.md
									
									
									
									
									
								
							
							
						
						
									
										60
									
								
								README_zh.md
									
									
									
									
									
								
							@ -149,41 +149,39 @@ https://github.com/hiyouga/LLaMA-Factory/assets/16256802/ec36a9dd-37f4-4f72-81bd
 | 
			
		||||
 | 
			
		||||
## 模型
 | 
			
		||||
 | 
			
		||||
| 模型名                                                   | 模型大小                          | 默认模块           | Template  |
 | 
			
		||||
| -------------------------------------------------------- | -------------------------------- | ----------------- | --------- |
 | 
			
		||||
| [Baichuan2](https://huggingface.co/baichuan-inc)         | 7B/13B                           | W_pack            | baichuan2 |
 | 
			
		||||
| [BLOOM](https://huggingface.co/bigscience)               | 560M/1.1B/1.7B/3B/7.1B/176B      | query_key_value   | -         |
 | 
			
		||||
| [BLOOMZ](https://huggingface.co/bigscience)              | 560M/1.1B/1.7B/3B/7.1B/176B      | query_key_value   | -         |
 | 
			
		||||
| [ChatGLM3](https://huggingface.co/THUDM)                 | 6B                               | query_key_value   | chatglm3  |
 | 
			
		||||
| [Command-R](https://huggingface.co/CohereForAI)          | 35B/104B                         | q_proj,v_proj     | cohere    |
 | 
			
		||||
| [DeepSeek (MoE)](https://huggingface.co/deepseek-ai)     | 7B/16B/67B/236B                  | q_proj,v_proj     | deepseek  |
 | 
			
		||||
| [Falcon](https://huggingface.co/tiiuae)                  | 7B/11B/40B/180B                  | query_key_value   | falcon    |
 | 
			
		||||
| [Gemma/CodeGemma](https://huggingface.co/google)         | 2B/7B                            | q_proj,v_proj     | gemma     |
 | 
			
		||||
| [GLM4](https://huggingface.co/THUDM)                     | 9B                               | query_key_value   | glm4      |
 | 
			
		||||
| [InternLM2](https://huggingface.co/internlm)             | 7B/20B                           | wqkv              | intern2   |
 | 
			
		||||
| [LLaMA](https://github.com/facebookresearch/llama)       | 7B/13B/33B/65B                   | q_proj,v_proj     | -         |
 | 
			
		||||
| [LLaMA-2](https://huggingface.co/meta-llama)             | 7B/13B/70B                       | q_proj,v_proj     | llama2    |
 | 
			
		||||
| [LLaMA-3](https://huggingface.co/meta-llama)             | 8B/70B                           | q_proj,v_proj     | llama3    |
 | 
			
		||||
| [LLaVA-1.5](https://huggingface.co/llava-hf)             | 7B/13B                           | q_proj,v_proj     | vicuna    |
 | 
			
		||||
| [Mistral/Mixtral](https://huggingface.co/mistralai)      | 7B/8x7B/8x22B                    | q_proj,v_proj     | mistral   |
 | 
			
		||||
| [OLMo](https://huggingface.co/allenai)                   | 1B/7B                            | q_proj,v_proj     | -         |
 | 
			
		||||
| [PaliGemma](https://huggingface.co/google)               | 3B                               | q_proj,v_proj     | gemma     |
 | 
			
		||||
| [Phi-1.5/2](https://huggingface.co/microsoft)            | 1.3B/2.7B                        | q_proj,v_proj     | -         |
 | 
			
		||||
| [Phi-3](https://huggingface.co/microsoft)                | 4B/7B/14B                        | qkv_proj          | phi       |
 | 
			
		||||
| [Qwen](https://huggingface.co/Qwen)                      | 1.8B/7B/14B/72B                  | c_attn            | qwen      |
 | 
			
		||||
| [Qwen1.5 (Code/MoE)](https://huggingface.co/Qwen)        | 0.5B/1.8B/4B/7B/14B/32B/72B/110B | q_proj,v_proj     | qwen      |
 | 
			
		||||
| [StarCoder2](https://huggingface.co/bigcode)             | 3B/7B/15B                        | q_proj,v_proj     | -         |
 | 
			
		||||
| [XVERSE](https://huggingface.co/xverse)                  | 7B/13B/65B                       | q_proj,v_proj     | xverse    |
 | 
			
		||||
| [Yi (1/1.5)](https://huggingface.co/01-ai)               | 6B/9B/34B                        | q_proj,v_proj     | yi        |
 | 
			
		||||
| [Yi-VL](https://huggingface.co/01-ai)                    | 6B/34B                           | q_proj,v_proj     | yi_vl     |
 | 
			
		||||
| [Yuan](https://huggingface.co/IEITYuan)                  | 2B/51B/102B                      | q_proj,v_proj     | yuan      |
 | 
			
		||||
| 模型名                                                   | 模型大小                          | Template  |
 | 
			
		||||
| -------------------------------------------------------- | -------------------------------- | --------- |
 | 
			
		||||
| [Baichuan2](https://huggingface.co/baichuan-inc)         | 7B/13B                           | baichuan2 |
 | 
			
		||||
| [BLOOM](https://huggingface.co/bigscience)               | 560M/1.1B/1.7B/3B/7.1B/176B      | -         |
 | 
			
		||||
| [BLOOMZ](https://huggingface.co/bigscience)              | 560M/1.1B/1.7B/3B/7.1B/176B      | -         |
 | 
			
		||||
| [ChatGLM3](https://huggingface.co/THUDM)                 | 6B                               | chatglm3  |
 | 
			
		||||
| [Command-R](https://huggingface.co/CohereForAI)          | 35B/104B                         | cohere    |
 | 
			
		||||
| [DeepSeek (MoE)](https://huggingface.co/deepseek-ai)     | 7B/16B/67B/236B                  | deepseek  |
 | 
			
		||||
| [Falcon](https://huggingface.co/tiiuae)                  | 7B/11B/40B/180B                  | falcon    |
 | 
			
		||||
| [Gemma/CodeGemma](https://huggingface.co/google)         | 2B/7B                            | gemma     |
 | 
			
		||||
| [GLM4](https://huggingface.co/THUDM)                     | 9B                               | glm4      |
 | 
			
		||||
| [InternLM2](https://huggingface.co/internlm)             | 7B/20B                           | intern2   |
 | 
			
		||||
| [LLaMA](https://github.com/facebookresearch/llama)       | 7B/13B/33B/65B                   | -         |
 | 
			
		||||
| [LLaMA-2](https://huggingface.co/meta-llama)             | 7B/13B/70B                       | llama2    |
 | 
			
		||||
| [LLaMA-3](https://huggingface.co/meta-llama)             | 8B/70B                           | llama3    |
 | 
			
		||||
| [LLaVA-1.5](https://huggingface.co/llava-hf)             | 7B/13B                           | vicuna    |
 | 
			
		||||
| [Mistral/Mixtral](https://huggingface.co/mistralai)      | 7B/8x7B/8x22B                    | mistral   |
 | 
			
		||||
| [OLMo](https://huggingface.co/allenai)                   | 1B/7B                            | -         |
 | 
			
		||||
| [PaliGemma](https://huggingface.co/google)               | 3B                               | gemma     |
 | 
			
		||||
| [Phi-1.5/2](https://huggingface.co/microsoft)            | 1.3B/2.7B                        | -         |
 | 
			
		||||
| [Phi-3](https://huggingface.co/microsoft)                | 4B/7B/14B                        | phi       |
 | 
			
		||||
| [Qwen](https://huggingface.co/Qwen)                      | 1.8B/7B/14B/72B                  | qwen      |
 | 
			
		||||
| [Qwen1.5 (Code/MoE)](https://huggingface.co/Qwen)        | 0.5B/1.8B/4B/7B/14B/32B/72B/110B | qwen      |
 | 
			
		||||
| [StarCoder2](https://huggingface.co/bigcode)             | 3B/7B/15B                        | -         |
 | 
			
		||||
| [XVERSE](https://huggingface.co/xverse)                  | 7B/13B/65B                       | xverse    |
 | 
			
		||||
| [Yi (1/1.5)](https://huggingface.co/01-ai)               | 6B/9B/34B                        | yi        |
 | 
			
		||||
| [Yi-VL](https://huggingface.co/01-ai)                    | 6B/34B                           | yi_vl     |
 | 
			
		||||
| [Yuan](https://huggingface.co/IEITYuan)                  | 2B/51B/102B                      | yuan      |
 | 
			
		||||
 | 
			
		||||
> [!NOTE]
 | 
			
		||||
> **默认模块**应作为 `lora_target` 参数的默认值,可使用 `lora_target: all` 参数指定全部模块以取得更好的效果。
 | 
			
		||||
>
 | 
			
		||||
> 对于所有“基座”(Base)模型,`template` 参数可以是 `default`, `alpaca`, `vicuna` 等任意值。但“对话”(Instruct/Chat)模型请务必使用**对应的模板**。
 | 
			
		||||
>
 | 
			
		||||
> 请务必在训练和推理时使用**完全一致**的模板。
 | 
			
		||||
> 请务必在训练和推理时采用**完全一致**的模板。
 | 
			
		||||
 | 
			
		||||
项目所支持模型的完整列表请参阅 [constants.py](src/llamafactory/extras/constants.py)。
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -6,7 +6,7 @@ quantization_bit: 4
 | 
			
		||||
stage: sft
 | 
			
		||||
do_train: true
 | 
			
		||||
finetuning_type: lora
 | 
			
		||||
lora_target: q_proj,v_proj
 | 
			
		||||
lora_target: all
 | 
			
		||||
 | 
			
		||||
### ddp
 | 
			
		||||
ddp_timeout: 180000000
 | 
			
		||||
 | 
			
		||||
@ -5,7 +5,7 @@ model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
 | 
			
		||||
stage: sft
 | 
			
		||||
do_train: true
 | 
			
		||||
finetuning_type: lora
 | 
			
		||||
lora_target: q_proj,v_proj
 | 
			
		||||
lora_target: all
 | 
			
		||||
loraplus_lr_ratio: 16.0
 | 
			
		||||
 | 
			
		||||
### dataset
 | 
			
		||||
 | 
			
		||||
@ -5,7 +5,7 @@ model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
 | 
			
		||||
stage: sft
 | 
			
		||||
do_train: true
 | 
			
		||||
finetuning_type: lora
 | 
			
		||||
lora_target: q_proj,v_proj
 | 
			
		||||
lora_target: all
 | 
			
		||||
 | 
			
		||||
### ddp
 | 
			
		||||
ddp_timeout: 180000000
 | 
			
		||||
 | 
			
		||||
@ -5,7 +5,7 @@ model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
 | 
			
		||||
stage: sft
 | 
			
		||||
do_train: true
 | 
			
		||||
finetuning_type: lora
 | 
			
		||||
lora_target: q_proj,v_proj
 | 
			
		||||
lora_target: all
 | 
			
		||||
 | 
			
		||||
### ddp
 | 
			
		||||
ddp_timeout: 180000000
 | 
			
		||||
 | 
			
		||||
@ -5,7 +5,7 @@ model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
 | 
			
		||||
stage: sft
 | 
			
		||||
do_train: true
 | 
			
		||||
finetuning_type: lora
 | 
			
		||||
lora_target: q_proj,v_proj
 | 
			
		||||
lora_target: all
 | 
			
		||||
 | 
			
		||||
### ddp
 | 
			
		||||
ddp_timeout: 180000000
 | 
			
		||||
 | 
			
		||||
@ -5,7 +5,7 @@ model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
 | 
			
		||||
stage: dpo
 | 
			
		||||
do_train: true
 | 
			
		||||
finetuning_type: lora
 | 
			
		||||
lora_target: q_proj,v_proj
 | 
			
		||||
lora_target: all
 | 
			
		||||
pref_beta: 0.1
 | 
			
		||||
pref_loss: sigmoid  # [sigmoid (dpo), orpo, simpo]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -5,7 +5,7 @@ model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
 | 
			
		||||
stage: kto
 | 
			
		||||
do_train: true
 | 
			
		||||
finetuning_type: lora
 | 
			
		||||
lora_target: q_proj,v_proj
 | 
			
		||||
lora_target: all
 | 
			
		||||
 | 
			
		||||
### dataset
 | 
			
		||||
dataset: kto_en_demo
 | 
			
		||||
 | 
			
		||||
@ -6,7 +6,7 @@ reward_model: saves/llama3-8b/lora/reward
 | 
			
		||||
stage: ppo
 | 
			
		||||
do_train: true
 | 
			
		||||
finetuning_type: lora
 | 
			
		||||
lora_target: q_proj,v_proj
 | 
			
		||||
lora_target: all
 | 
			
		||||
 | 
			
		||||
### dataset
 | 
			
		||||
dataset: identity,alpaca_en_demo
 | 
			
		||||
 | 
			
		||||
@ -5,7 +5,7 @@ model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
 | 
			
		||||
stage: pt
 | 
			
		||||
do_train: true
 | 
			
		||||
finetuning_type: lora
 | 
			
		||||
lora_target: q_proj,v_proj
 | 
			
		||||
lora_target: all
 | 
			
		||||
 | 
			
		||||
### dataset
 | 
			
		||||
dataset: c4_demo
 | 
			
		||||
 | 
			
		||||
@ -5,7 +5,7 @@ model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
 | 
			
		||||
stage: rm
 | 
			
		||||
do_train: true
 | 
			
		||||
finetuning_type: lora
 | 
			
		||||
lora_target: q_proj,v_proj
 | 
			
		||||
lora_target: all
 | 
			
		||||
 | 
			
		||||
### dataset
 | 
			
		||||
dataset: dpo_en_demo
 | 
			
		||||
 | 
			
		||||
@ -5,7 +5,7 @@ model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
 | 
			
		||||
stage: sft
 | 
			
		||||
do_train: true
 | 
			
		||||
finetuning_type: lora
 | 
			
		||||
lora_target: q_proj,v_proj
 | 
			
		||||
lora_target: all
 | 
			
		||||
 | 
			
		||||
### dataset
 | 
			
		||||
dataset: identity,alpaca_en_demo
 | 
			
		||||
 | 
			
		||||
@ -5,7 +5,7 @@ model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
 | 
			
		||||
stage: sft
 | 
			
		||||
do_train: true
 | 
			
		||||
finetuning_type: lora
 | 
			
		||||
lora_target: q_proj,v_proj
 | 
			
		||||
lora_target: all
 | 
			
		||||
 | 
			
		||||
### dataset
 | 
			
		||||
dataset: identity,alpaca_en_demo
 | 
			
		||||
 | 
			
		||||
@ -6,7 +6,7 @@ visual_inputs: true
 | 
			
		||||
stage: sft
 | 
			
		||||
do_train: true
 | 
			
		||||
finetuning_type: lora
 | 
			
		||||
lora_target: q_proj,v_proj
 | 
			
		||||
lora_target: all
 | 
			
		||||
 | 
			
		||||
### dataset
 | 
			
		||||
dataset: mllm_demo
 | 
			
		||||
 | 
			
		||||
@ -5,7 +5,7 @@ model_name_or_path: ISTA-DASLab/Meta-Llama-3-8B-Instruct-AQLM-2Bit-1x16
 | 
			
		||||
stage: sft
 | 
			
		||||
do_train: true
 | 
			
		||||
finetuning_type: lora
 | 
			
		||||
lora_target: q_proj,v_proj
 | 
			
		||||
lora_target: all
 | 
			
		||||
 | 
			
		||||
### dataset
 | 
			
		||||
dataset: identity,alpaca_en_demo
 | 
			
		||||
 | 
			
		||||
@ -5,7 +5,7 @@ model_name_or_path: TechxGenus/Meta-Llama-3-8B-Instruct-AWQ
 | 
			
		||||
stage: sft
 | 
			
		||||
do_train: true
 | 
			
		||||
finetuning_type: lora
 | 
			
		||||
lora_target: q_proj,v_proj
 | 
			
		||||
lora_target: all
 | 
			
		||||
 | 
			
		||||
### dataset
 | 
			
		||||
dataset: identity,alpaca_en_demo
 | 
			
		||||
 | 
			
		||||
@ -6,7 +6,7 @@ quantization_bit: 4
 | 
			
		||||
stage: sft
 | 
			
		||||
do_train: true
 | 
			
		||||
finetuning_type: lora
 | 
			
		||||
lora_target: q_proj,v_proj
 | 
			
		||||
lora_target: all
 | 
			
		||||
 | 
			
		||||
### dataset
 | 
			
		||||
dataset: identity,alpaca_en_demo
 | 
			
		||||
 | 
			
		||||
@ -5,7 +5,7 @@ model_name_or_path: TechxGenus/Meta-Llama-3-8B-Instruct-GPTQ
 | 
			
		||||
stage: sft
 | 
			
		||||
do_train: true
 | 
			
		||||
finetuning_type: lora
 | 
			
		||||
lora_target: q_proj,v_proj
 | 
			
		||||
lora_target: all
 | 
			
		||||
 | 
			
		||||
### dataset
 | 
			
		||||
dataset: identity,alpaca_en_demo
 | 
			
		||||
 | 
			
		||||
@ -20,8 +20,6 @@ CHOICES = ["A", "B", "C", "D"]
 | 
			
		||||
 | 
			
		||||
DATA_CONFIG = "dataset_info.json"
 | 
			
		||||
 | 
			
		||||
DEFAULT_MODULE = defaultdict(str)
 | 
			
		||||
 | 
			
		||||
DEFAULT_TEMPLATE = defaultdict(str)
 | 
			
		||||
 | 
			
		||||
FILEEXT2TYPE = {
 | 
			
		||||
@ -80,7 +78,6 @@ class DownloadSource(str, Enum):
 | 
			
		||||
 | 
			
		||||
def register_model_group(
 | 
			
		||||
    models: Dict[str, Dict[DownloadSource, str]],
 | 
			
		||||
    module: Optional[str] = None,
 | 
			
		||||
    template: Optional[str] = None,
 | 
			
		||||
    vision: bool = False,
 | 
			
		||||
) -> None:
 | 
			
		||||
@ -91,8 +88,6 @@ def register_model_group(
 | 
			
		||||
        else:
 | 
			
		||||
            assert prefix == name.split("-")[0], "prefix should be identical."
 | 
			
		||||
        SUPPORTED_MODELS[name] = path
 | 
			
		||||
    if module is not None:
 | 
			
		||||
        DEFAULT_MODULE[prefix] = module
 | 
			
		||||
    if template is not None:
 | 
			
		||||
        DEFAULT_TEMPLATE[prefix] = template
 | 
			
		||||
    if vision:
 | 
			
		||||
@ -127,7 +122,6 @@ register_model_group(
 | 
			
		||||
            DownloadSource.MODELSCOPE: "baichuan-inc/Baichuan-13B-Chat",
 | 
			
		||||
        },
 | 
			
		||||
    },
 | 
			
		||||
    module="W_pack",
 | 
			
		||||
    template="baichuan",
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
@ -151,7 +145,6 @@ register_model_group(
 | 
			
		||||
            DownloadSource.MODELSCOPE: "baichuan-inc/Baichuan2-13B-Chat",
 | 
			
		||||
        },
 | 
			
		||||
    },
 | 
			
		||||
    module="W_pack",
 | 
			
		||||
    template="baichuan2",
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
@ -171,7 +164,6 @@ register_model_group(
 | 
			
		||||
            DownloadSource.MODELSCOPE: "AI-ModelScope/bloom-7b1",
 | 
			
		||||
        },
 | 
			
		||||
    },
 | 
			
		||||
    module="query_key_value",
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -190,7 +182,6 @@ register_model_group(
 | 
			
		||||
            DownloadSource.MODELSCOPE: "AI-ModelScope/bloomz-7b1-mt",
 | 
			
		||||
        },
 | 
			
		||||
    },
 | 
			
		||||
    module="query_key_value",
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -229,7 +220,6 @@ register_model_group(
 | 
			
		||||
            DownloadSource.MODELSCOPE: "ZhipuAI/chatglm2-6b",
 | 
			
		||||
        }
 | 
			
		||||
    },
 | 
			
		||||
    module="query_key_value",
 | 
			
		||||
    template="chatglm2",
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
@ -245,7 +235,6 @@ register_model_group(
 | 
			
		||||
            DownloadSource.MODELSCOPE: "ZhipuAI/chatglm3-6b",
 | 
			
		||||
        },
 | 
			
		||||
    },
 | 
			
		||||
    module="query_key_value",
 | 
			
		||||
    template="chatglm3",
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
@ -344,7 +333,6 @@ register_model_group(
 | 
			
		||||
            DownloadSource.MODELSCOPE: "AI-ModelScope/dbrx-instruct",
 | 
			
		||||
        },
 | 
			
		||||
    },
 | 
			
		||||
    module="Wqkv",
 | 
			
		||||
    template="dbrx",
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
@ -463,7 +451,6 @@ register_model_group(
 | 
			
		||||
            DownloadSource.MODELSCOPE: "modelscope/falcon-180B-chat",
 | 
			
		||||
        },
 | 
			
		||||
    },
 | 
			
		||||
    module="query_key_value",
 | 
			
		||||
    template="falcon",
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
@ -512,7 +499,6 @@ register_model_group(
 | 
			
		||||
            DownloadSource.MODELSCOPE: "ZhipuAI/glm-4-9b-chat-1m",
 | 
			
		||||
        },
 | 
			
		||||
    },
 | 
			
		||||
    module="query_key_value",
 | 
			
		||||
    template="glm4",
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
@ -559,7 +545,6 @@ register_model_group(
 | 
			
		||||
            DownloadSource.MODELSCOPE: "Shanghai_AI_Laboratory/internlm2-chat-20b",
 | 
			
		||||
        },
 | 
			
		||||
    },
 | 
			
		||||
    module="wqkv",
 | 
			
		||||
    template="intern2",
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
@ -581,7 +566,6 @@ register_model_group(
 | 
			
		||||
            DownloadSource.MODELSCOPE: "DeepLang/LingoWhale-8B",
 | 
			
		||||
        }
 | 
			
		||||
    },
 | 
			
		||||
    module="qkv_proj",
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -868,7 +852,6 @@ register_model_group(
 | 
			
		||||
            DownloadSource.MODELSCOPE: "LLM-Research/Phi-3-medium-128k-instruct",
 | 
			
		||||
        },
 | 
			
		||||
    },
 | 
			
		||||
    module="qkv_proj",
 | 
			
		||||
    template="phi",
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
@ -940,7 +923,6 @@ register_model_group(
 | 
			
		||||
            DownloadSource.MODELSCOPE: "qwen/Qwen-72B-Chat-Int4",
 | 
			
		||||
        },
 | 
			
		||||
    },
 | 
			
		||||
    module="c_attn",
 | 
			
		||||
    template="qwen",
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
@ -1153,7 +1135,6 @@ register_model_group(
 | 
			
		||||
            DownloadSource.MODELSCOPE: "TeleAI/TeleChat-12B-v2",
 | 
			
		||||
        },
 | 
			
		||||
    },
 | 
			
		||||
    module="query,key_value",
 | 
			
		||||
    template="telechat",
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -24,12 +24,7 @@ class FreezeArguments:
 | 
			
		||||
            "help": (
 | 
			
		||||
                "Name(s) of trainable modules for freeze (partial-parameter) fine-tuning. "
 | 
			
		||||
                "Use commas to separate multiple modules. "
 | 
			
		||||
                "Use `all` to specify all the available modules. "
 | 
			
		||||
                "LLaMA choices: [`mlp`, `self_attn`], "
 | 
			
		||||
                "BLOOM & Falcon & ChatGLM choices: [`mlp`, `self_attention`], "
 | 
			
		||||
                "Qwen choices: [`mlp`, `attn`], "
 | 
			
		||||
                "InternLM2 choices: [`feed_forward`, `attention`], "
 | 
			
		||||
                "Others choices: the same as LLaMA."
 | 
			
		||||
                "Use `all` to specify all the available modules."
 | 
			
		||||
            )
 | 
			
		||||
        },
 | 
			
		||||
    )
 | 
			
		||||
@ -79,13 +74,7 @@ class LoraArguments:
 | 
			
		||||
            "help": (
 | 
			
		||||
                "Name(s) of target modules to apply LoRA. "
 | 
			
		||||
                "Use commas to separate multiple modules. "
 | 
			
		||||
                "Use `all` to specify all the linear modules. "
 | 
			
		||||
                "LLaMA choices: [`q_proj`, `k_proj`, `v_proj`, `o_proj`, `gate_proj`, `up_proj`, `down_proj`], "
 | 
			
		||||
                "BLOOM & Falcon & ChatGLM choices: [`query_key_value`, `dense`, `dense_h_to_4h`, `dense_4h_to_h`], "
 | 
			
		||||
                "Baichuan choices: [`W_pack`, `o_proj`, `gate_proj`, `up_proj`, `down_proj`], "
 | 
			
		||||
                "Qwen choices: [`c_attn`, `attn.c_proj`, `w1`, `w2`, `mlp.c_proj`], "
 | 
			
		||||
                "InternLM2 choices: [`wqkv`, `wo`, `w1`, `w2`, `w3`], "
 | 
			
		||||
                "Others choices: the same as LLaMA."
 | 
			
		||||
                "Use `all` to specify all the linear modules."
 | 
			
		||||
            )
 | 
			
		||||
        },
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
@ -8,7 +8,6 @@ from yaml import safe_dump, safe_load
 | 
			
		||||
from ..extras.constants import (
 | 
			
		||||
    CHECKPOINT_NAMES,
 | 
			
		||||
    DATA_CONFIG,
 | 
			
		||||
    DEFAULT_MODULE,
 | 
			
		||||
    DEFAULT_TEMPLATE,
 | 
			
		||||
    PEFT_METHODS,
 | 
			
		||||
    STAGES_USE_PAIR_DATA,
 | 
			
		||||
@ -118,13 +117,6 @@ def get_model_info(model_name: str) -> Tuple[str, str, bool]:
 | 
			
		||||
    return get_model_path(model_name), get_template(model_name), get_visual(model_name)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_module(model_name: str) -> str:
 | 
			
		||||
    r"""
 | 
			
		||||
    Gets the LoRA modules of this model.
 | 
			
		||||
    """
 | 
			
		||||
    return DEFAULT_MODULE.get(get_prefix(model_name), "all")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_template(model_name: str) -> str:
 | 
			
		||||
    r"""
 | 
			
		||||
    Gets the template name if the model is a chat model.
 | 
			
		||||
 | 
			
		||||
@ -8,7 +8,7 @@ from transformers.trainer import TRAINING_ARGS_NAME
 | 
			
		||||
from ..extras.constants import PEFT_METHODS, TRAINING_STAGES
 | 
			
		||||
from ..extras.misc import is_gpu_or_npu_available, torch_gc
 | 
			
		||||
from ..extras.packages import is_gradio_available
 | 
			
		||||
from .common import DEFAULT_CACHE_DIR, get_module, get_save_dir, load_config
 | 
			
		||||
from .common import DEFAULT_CACHE_DIR, get_save_dir, load_config
 | 
			
		||||
from .locales import ALERTS
 | 
			
		||||
from .utils import abort_leaf_process, gen_cmd, get_eval_results, get_trainer_info, load_args, save_args, save_cmd
 | 
			
		||||
 | 
			
		||||
@ -159,7 +159,7 @@ class Runner:
 | 
			
		||||
            args["create_new_adapter"] = get("train.create_new_adapter")
 | 
			
		||||
            args["use_rslora"] = get("train.use_rslora")
 | 
			
		||||
            args["use_dora"] = get("train.use_dora")
 | 
			
		||||
            args["lora_target"] = get("train.lora_target") or get_module(model_name)
 | 
			
		||||
            args["lora_target"] = get("train.lora_target") or "all"
 | 
			
		||||
            args["additional_target"] = get("train.additional_target") or None
 | 
			
		||||
 | 
			
		||||
            if args["use_llama_pro"]:
 | 
			
		||||
 | 
			
		||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user