mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-08-02 19:52:50 +08:00
Merge pull request #6246 from hiyouga/hiyouga/update_examples
[examples] update examples Former-commit-id: 967a6c12a72b97104bd262b0f2ed945a8b0cd81d
This commit is contained in:
commit
7f8c59144e
@ -126,14 +126,14 @@ llamafactory-cli train examples/train_qlora/llama3_lora_sft_aqlm.yaml
|
|||||||
#### Supervised Fine-Tuning on Single Node
|
#### Supervised Fine-Tuning on Single Node
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
FORCE_TORCHRUN=1 llamafactory-cli train examples/train_full/llama3_full_sft_ds3.yaml
|
FORCE_TORCHRUN=1 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Supervised Fine-Tuning on Multiple Nodes
|
#### Supervised Fine-Tuning on Multiple Nodes
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
FORCE_TORCHRUN=1 NNODES=2 RANK=0 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft_ds3.yaml
|
FORCE_TORCHRUN=1 NNODES=2 RANK=0 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||||
FORCE_TORCHRUN=1 NNODES=2 RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft_ds3.yaml
|
FORCE_TORCHRUN=1 NNODES=2 RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Multimodal Supervised Fine-Tuning
|
#### Multimodal Supervised Fine-Tuning
|
||||||
|
@ -126,14 +126,14 @@ llamafactory-cli train examples/train_qlora/llama3_lora_sft_aqlm.yaml
|
|||||||
#### 在单机上进行指令监督微调
|
#### 在单机上进行指令监督微调
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
FORCE_TORCHRUN=1 llamafactory-cli train examples/train_full/llama3_full_sft_ds3.yaml
|
FORCE_TORCHRUN=1 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### 在多机上进行指令监督微调
|
#### 在多机上进行指令监督微调
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
FORCE_TORCHRUN=1 NNODES=2 RANK=0 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft_ds3.yaml
|
FORCE_TORCHRUN=1 NNODES=2 RANK=0 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||||
FORCE_TORCHRUN=1 NNODES=2 RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft_ds3.yaml
|
FORCE_TORCHRUN=1 NNODES=2 RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
#### 多模态指令监督微调
|
#### 多模态指令监督微调
|
||||||
|
@ -5,7 +5,7 @@ model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
|||||||
stage: sft
|
stage: sft
|
||||||
do_train: true
|
do_train: true
|
||||||
finetuning_type: full
|
finetuning_type: full
|
||||||
deepspeed: examples/deepspeed/ds_z3_config.json
|
deepspeed: examples/deepspeed/ds_z3_config.json # choices: [ds_z0_config.json, ds_z2_config.json, ds_z3_config.json]
|
||||||
|
|
||||||
### dataset
|
### dataset
|
||||||
dataset: identity,alpaca_en_demo
|
dataset: identity,alpaca_en_demo
|
@ -5,10 +5,11 @@ model_name_or_path: Qwen/Qwen2-VL-7B-Instruct
|
|||||||
stage: sft
|
stage: sft
|
||||||
do_train: true
|
do_train: true
|
||||||
finetuning_type: full
|
finetuning_type: full
|
||||||
deepspeed: examples/deepspeed/ds_z3_config.json
|
freeze_vision_tower: true # choices: [true, false]
|
||||||
|
deepspeed: examples/deepspeed/ds_z3_config.json # choices: [ds_z0_config.json, ds_z2_config.json, ds_z3_config.json]
|
||||||
|
|
||||||
### dataset
|
### dataset
|
||||||
dataset: mllm_demo,identity
|
dataset: mllm_demo,identity,alpaca_en_demo
|
||||||
template: qwen2_vl
|
template: qwen2_vl
|
||||||
cutoff_len: 2048
|
cutoff_len: 2048
|
||||||
max_samples: 1000
|
max_samples: 1000
|
||||||
@ -26,7 +27,7 @@ overwrite_output_dir: true
|
|||||||
per_device_train_batch_size: 1
|
per_device_train_batch_size: 1
|
||||||
gradient_accumulation_steps: 2
|
gradient_accumulation_steps: 2
|
||||||
learning_rate: 1.0e-5
|
learning_rate: 1.0e-5
|
||||||
num_train_epochs: 3.0
|
num_train_epochs: 30.0
|
||||||
lr_scheduler_type: cosine
|
lr_scheduler_type: cosine
|
||||||
warmup_ratio: 0.1
|
warmup_ratio: 0.1
|
||||||
bf16: true
|
bf16: true
|
||||||
|
@ -1,40 +0,0 @@
|
|||||||
### model
|
|
||||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
|
||||||
|
|
||||||
### method
|
|
||||||
stage: sft
|
|
||||||
do_train: true
|
|
||||||
finetuning_type: lora
|
|
||||||
lora_target: all
|
|
||||||
deepspeed: examples/deepspeed/ds_z0_config.json
|
|
||||||
|
|
||||||
### dataset
|
|
||||||
dataset: identity,alpaca_en_demo
|
|
||||||
template: llama3
|
|
||||||
cutoff_len: 2048
|
|
||||||
max_samples: 1000
|
|
||||||
overwrite_cache: true
|
|
||||||
preprocessing_num_workers: 16
|
|
||||||
|
|
||||||
### output
|
|
||||||
output_dir: saves/llama3-8b/lora/sft
|
|
||||||
logging_steps: 10
|
|
||||||
save_steps: 500
|
|
||||||
plot_loss: true
|
|
||||||
overwrite_output_dir: true
|
|
||||||
|
|
||||||
### train
|
|
||||||
per_device_train_batch_size: 1
|
|
||||||
gradient_accumulation_steps: 2
|
|
||||||
learning_rate: 1.0e-4
|
|
||||||
num_train_epochs: 3.0
|
|
||||||
lr_scheduler_type: cosine
|
|
||||||
warmup_ratio: 0.1
|
|
||||||
bf16: true
|
|
||||||
ddp_timeout: 180000000
|
|
||||||
|
|
||||||
### eval
|
|
||||||
val_size: 0.1
|
|
||||||
per_device_eval_batch_size: 1
|
|
||||||
eval_strategy: steps
|
|
||||||
eval_steps: 500
|
|
@ -6,7 +6,7 @@ stage: sft
|
|||||||
do_train: true
|
do_train: true
|
||||||
finetuning_type: lora
|
finetuning_type: lora
|
||||||
lora_target: all
|
lora_target: all
|
||||||
deepspeed: examples/deepspeed/ds_z3_config.json
|
deepspeed: examples/deepspeed/ds_z3_config.json # choices: [ds_z0_config.json, ds_z2_config.json, ds_z3_config.json]
|
||||||
|
|
||||||
### dataset
|
### dataset
|
||||||
dataset: identity,alpaca_en_demo
|
dataset: identity,alpaca_en_demo
|
||||||
|
@ -8,7 +8,7 @@ finetuning_type: lora
|
|||||||
lora_target: all
|
lora_target: all
|
||||||
|
|
||||||
### dataset
|
### dataset
|
||||||
dataset: mllm_demo,identity # video: mllm_video_demo
|
dataset: mllm_demo,identity,alpaca_en_demo # video: mllm_video_demo
|
||||||
template: qwen2_vl
|
template: qwen2_vl
|
||||||
cutoff_len: 2048
|
cutoff_len: 2048
|
||||||
max_samples: 1000
|
max_samples: 1000
|
||||||
|
Loading…
x
Reference in New Issue
Block a user