mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-10-17 17:18:10 +08:00
update examples
Former-commit-id: 194e25606515bfa42c3be27d68f68d604191514b
This commit is contained in:
parent
3168abc0a1
commit
6b407092d9
37
examples/full_multi_gpu/multi_node.sh
Normal file
37
examples/full_multi_gpu/multi_node.sh
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
python -m torch.distributed.run \
|
||||||
|
--nproc_per_node $NPROC_PER_NODE \
|
||||||
|
--nnodes $NNODES \
|
||||||
|
--node_rank $RANK \
|
||||||
|
--master_addr $MASTER_ADDR \
|
||||||
|
--master_port $MASTER_PORT \
|
||||||
|
../../src/train_bash.py \
|
||||||
|
--deepspeed ds_z3_config.json \
|
||||||
|
--stage sft \
|
||||||
|
--do_train \
|
||||||
|
--model_name_or_path meta-llama/Llama-2-7b-hf \
|
||||||
|
--dataset alpaca_gpt4_en \
|
||||||
|
--dataset_dir ../../data \
|
||||||
|
--template default \
|
||||||
|
--finetuning_type full \
|
||||||
|
--output_dir ../../saves/LLaMA2-7B/full/sft \
|
||||||
|
--overwrite_cache \
|
||||||
|
--overwrite_output_dir \
|
||||||
|
--cutoff_len 1024 \
|
||||||
|
--preprocessing_num_workers 16 \
|
||||||
|
--per_device_train_batch_size 1 \
|
||||||
|
--per_device_eval_batch_size 1 \
|
||||||
|
--gradient_accumulation_steps 2 \
|
||||||
|
--lr_scheduler_type cosine \
|
||||||
|
--logging_steps 10 \
|
||||||
|
--warmup_steps 20 \
|
||||||
|
--save_steps 100 \
|
||||||
|
--eval_steps 100 \
|
||||||
|
--evaluation_strategy steps \
|
||||||
|
--learning_rate 5e-5 \
|
||||||
|
--num_train_epochs 3.0 \
|
||||||
|
--max_samples 3000 \
|
||||||
|
--val_size 0.1 \
|
||||||
|
--plot_loss \
|
||||||
|
--fp16
|
@ -13,11 +13,13 @@ deepspeed --num_gpus 4 ../../src/train_bash.py \
|
|||||||
--overwrite_cache \
|
--overwrite_cache \
|
||||||
--overwrite_output_dir \
|
--overwrite_output_dir \
|
||||||
--cutoff_len 1024 \
|
--cutoff_len 1024 \
|
||||||
|
--preprocessing_num_workers 16 \
|
||||||
--per_device_train_batch_size 1 \
|
--per_device_train_batch_size 1 \
|
||||||
--per_device_eval_batch_size 1 \
|
--per_device_eval_batch_size 1 \
|
||||||
--gradient_accumulation_steps 2 \
|
--gradient_accumulation_steps 2 \
|
||||||
--lr_scheduler_type cosine \
|
--lr_scheduler_type cosine \
|
||||||
--logging_steps 10 \
|
--logging_steps 10 \
|
||||||
|
--warmup_steps 20 \
|
||||||
--save_steps 100 \
|
--save_steps 100 \
|
||||||
--eval_steps 100 \
|
--eval_steps 100 \
|
||||||
--evaluation_strategy steps \
|
--evaluation_strategy steps \
|
18
examples/lora_multi_gpu/master_config.yaml
Normal file
18
examples/lora_multi_gpu/master_config.yaml
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
compute_environment: LOCAL_MACHINE
|
||||||
|
debug: false
|
||||||
|
distributed_type: MULTI_GPU
|
||||||
|
downcast_bf16: 'no'
|
||||||
|
gpu_ids: all
|
||||||
|
machine_rank: 0
|
||||||
|
main_process_ip: 192.168.0.1
|
||||||
|
main_process_port: 29555
|
||||||
|
main_training_function: main
|
||||||
|
mixed_precision: fp16
|
||||||
|
num_machines: 2
|
||||||
|
num_processes: 16
|
||||||
|
rdzv_backend: static
|
||||||
|
same_network: true
|
||||||
|
tpu_env: []
|
||||||
|
tpu_use_cluster: false
|
||||||
|
tpu_use_sudo: false
|
||||||
|
use_cpu: false
|
@ -1,6 +1,8 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
CUDA_VISIBLE_DEVICES=0,1,2,3 accelerate launch --config_file config.yaml ../../src/train_bash.py \
|
CUDA_VISIBLE_DEVICES=0,1,2,3 accelerate launch \
|
||||||
|
--config_file master_config.yaml \
|
||||||
|
../../src/train_bash.py \
|
||||||
--stage sft \
|
--stage sft \
|
||||||
--do_train \
|
--do_train \
|
||||||
--model_name_or_path meta-llama/Llama-2-7b-hf \
|
--model_name_or_path meta-llama/Llama-2-7b-hf \
|
||||||
@ -13,11 +15,13 @@ CUDA_VISIBLE_DEVICES=0,1,2,3 accelerate launch --config_file config.yaml ../../s
|
|||||||
--overwrite_cache \
|
--overwrite_cache \
|
||||||
--overwrite_output_dir \
|
--overwrite_output_dir \
|
||||||
--cutoff_len 1024 \
|
--cutoff_len 1024 \
|
||||||
|
--preprocessing_num_workers 16 \
|
||||||
--per_device_train_batch_size 1 \
|
--per_device_train_batch_size 1 \
|
||||||
--per_device_eval_batch_size 1 \
|
--per_device_eval_batch_size 1 \
|
||||||
--gradient_accumulation_steps 2 \
|
--gradient_accumulation_steps 2 \
|
||||||
--lr_scheduler_type cosine \
|
--lr_scheduler_type cosine \
|
||||||
--logging_steps 10 \
|
--logging_steps 10 \
|
||||||
|
--warmup_steps 20 \
|
||||||
--save_steps 100 \
|
--save_steps 100 \
|
||||||
--eval_steps 100 \
|
--eval_steps 100 \
|
||||||
--evaluation_strategy steps \
|
--evaluation_strategy steps \
|
34
examples/lora_multi_gpu/single_node.sh
Normal file
34
examples/lora_multi_gpu/single_node.sh
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 accelerate launch \
|
||||||
|
--config_file single_config.yaml \
|
||||||
|
../../src/train_bash.py \
|
||||||
|
--stage sft \
|
||||||
|
--do_train \
|
||||||
|
--model_name_or_path meta-llama/Llama-2-7b-hf \
|
||||||
|
--dataset alpaca_gpt4_en,glaive_toolcall \
|
||||||
|
--dataset_dir ../../data \
|
||||||
|
--template default \
|
||||||
|
--finetuning_type lora \
|
||||||
|
--lora_target q_proj,v_proj \
|
||||||
|
--output_dir ../../saves/LLaMA2-7B/lora/sft \
|
||||||
|
--overwrite_cache \
|
||||||
|
--overwrite_output_dir \
|
||||||
|
--cutoff_len 1024 \
|
||||||
|
--preprocessing_num_workers 16 \
|
||||||
|
--per_device_train_batch_size 1 \
|
||||||
|
--per_device_eval_batch_size 1 \
|
||||||
|
--gradient_accumulation_steps 2 \
|
||||||
|
--lr_scheduler_type cosine \
|
||||||
|
--logging_steps 10 \
|
||||||
|
--warmup_steps 20 \
|
||||||
|
--save_steps 100 \
|
||||||
|
--eval_steps 100 \
|
||||||
|
--evaluation_strategy steps \
|
||||||
|
--load_best_model_at_end \
|
||||||
|
--learning_rate 5e-5 \
|
||||||
|
--num_train_epochs 3.0 \
|
||||||
|
--max_samples 3000 \
|
||||||
|
--val_size 0.1 \
|
||||||
|
--plot_loss \
|
||||||
|
--fp16
|
18
examples/lora_multi_gpu/slave_config.yaml
Normal file
18
examples/lora_multi_gpu/slave_config.yaml
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
compute_environment: LOCAL_MACHINE
|
||||||
|
debug: false
|
||||||
|
distributed_type: MULTI_GPU
|
||||||
|
downcast_bf16: 'no'
|
||||||
|
gpu_ids: all
|
||||||
|
machine_rank: 1
|
||||||
|
main_process_ip: 192.168.0.1
|
||||||
|
main_process_port: 29555
|
||||||
|
main_training_function: main
|
||||||
|
mixed_precision: fp16
|
||||||
|
num_machines: 2
|
||||||
|
num_processes: 16
|
||||||
|
rdzv_backend: static
|
||||||
|
same_network: true
|
||||||
|
tpu_env: []
|
||||||
|
tpu_use_cluster: false
|
||||||
|
tpu_use_sudo: false
|
||||||
|
use_cpu: false
|
@ -15,11 +15,13 @@ CUDA_VISIBLE_DEVICES=0 python ../../src/train_bash.py \
|
|||||||
--overwrite_cache \
|
--overwrite_cache \
|
||||||
--overwrite_output_dir \
|
--overwrite_output_dir \
|
||||||
--cutoff_len 1024 \
|
--cutoff_len 1024 \
|
||||||
|
--preprocessing_num_workers 16 \
|
||||||
--per_device_train_batch_size 1 \
|
--per_device_train_batch_size 1 \
|
||||||
--per_device_eval_batch_size 1 \
|
--per_device_eval_batch_size 1 \
|
||||||
--gradient_accumulation_steps 8 \
|
--gradient_accumulation_steps 8 \
|
||||||
--lr_scheduler_type cosine \
|
--lr_scheduler_type cosine \
|
||||||
--logging_steps 10 \
|
--logging_steps 10 \
|
||||||
|
--warmup_steps 20 \
|
||||||
--save_steps 100 \
|
--save_steps 100 \
|
||||||
--eval_steps 100 \
|
--eval_steps 100 \
|
||||||
--evaluation_strategy steps \
|
--evaluation_strategy steps \
|
||||||
|
@ -16,6 +16,7 @@ CUDA_VISIBLE_DEVICES=0 python ../../src/train_bash.py \
|
|||||||
--overwrite_cache \
|
--overwrite_cache \
|
||||||
--overwrite_output_dir \
|
--overwrite_output_dir \
|
||||||
--cutoff_len 512 \
|
--cutoff_len 512 \
|
||||||
|
--preprocessing_num_workers 16 \
|
||||||
--per_device_train_batch_size 1 \
|
--per_device_train_batch_size 1 \
|
||||||
--gradient_accumulation_steps 8 \
|
--gradient_accumulation_steps 8 \
|
||||||
--lr_scheduler_type cosine \
|
--lr_scheduler_type cosine \
|
||||||
|
@ -13,6 +13,7 @@ CUDA_VISIBLE_DEVICES=0 python ../../src/train_bash.py \
|
|||||||
--overwrite_cache \
|
--overwrite_cache \
|
||||||
--overwrite_output_dir \
|
--overwrite_output_dir \
|
||||||
--cutoff_len 1024 \
|
--cutoff_len 1024 \
|
||||||
|
--preprocessing_num_workers 16 \
|
||||||
--per_device_eval_batch_size 1 \
|
--per_device_eval_batch_size 1 \
|
||||||
--max_samples 20 \
|
--max_samples 20 \
|
||||||
--predict_with_generate
|
--predict_with_generate
|
||||||
|
@ -12,11 +12,13 @@ CUDA_VISIBLE_DEVICES=0 python ../../src/train_bash.py \
|
|||||||
--overwrite_cache \
|
--overwrite_cache \
|
||||||
--overwrite_output_dir \
|
--overwrite_output_dir \
|
||||||
--cutoff_len 1024 \
|
--cutoff_len 1024 \
|
||||||
|
--preprocessing_num_workers 16 \
|
||||||
--per_device_train_batch_size 1 \
|
--per_device_train_batch_size 1 \
|
||||||
--per_device_eval_batch_size 1 \
|
--per_device_eval_batch_size 1 \
|
||||||
--gradient_accumulation_steps 8 \
|
--gradient_accumulation_steps 8 \
|
||||||
--lr_scheduler_type cosine \
|
--lr_scheduler_type cosine \
|
||||||
--logging_steps 10 \
|
--logging_steps 10 \
|
||||||
|
--warmup_steps 20 \
|
||||||
--save_steps 100 \
|
--save_steps 100 \
|
||||||
--eval_steps 100 \
|
--eval_steps 100 \
|
||||||
--evaluation_strategy steps \
|
--evaluation_strategy steps \
|
||||||
|
@ -15,11 +15,13 @@ CUDA_VISIBLE_DEVICES=0 python ../../src/train_bash.py \
|
|||||||
--overwrite_cache \
|
--overwrite_cache \
|
||||||
--overwrite_output_dir \
|
--overwrite_output_dir \
|
||||||
--cutoff_len 1024 \
|
--cutoff_len 1024 \
|
||||||
|
--preprocessing_num_workers 16 \
|
||||||
--per_device_train_batch_size 1 \
|
--per_device_train_batch_size 1 \
|
||||||
--per_device_eval_batch_size 1 \
|
--per_device_eval_batch_size 1 \
|
||||||
--gradient_accumulation_steps 8 \
|
--gradient_accumulation_steps 8 \
|
||||||
--lr_scheduler_type cosine \
|
--lr_scheduler_type cosine \
|
||||||
--logging_steps 10 \
|
--logging_steps 10 \
|
||||||
|
--warmup_steps 20 \
|
||||||
--save_steps 100 \
|
--save_steps 100 \
|
||||||
--eval_steps 100 \
|
--eval_steps 100 \
|
||||||
--evaluation_strategy steps \
|
--evaluation_strategy steps \
|
||||||
|
@ -13,11 +13,13 @@ CUDA_VISIBLE_DEVICES=0 python ../../src/train_bash.py \
|
|||||||
--overwrite_cache \
|
--overwrite_cache \
|
||||||
--overwrite_output_dir \
|
--overwrite_output_dir \
|
||||||
--cutoff_len 1024 \
|
--cutoff_len 1024 \
|
||||||
|
--preprocessing_num_workers 16 \
|
||||||
--per_device_train_batch_size 1 \
|
--per_device_train_batch_size 1 \
|
||||||
--per_device_eval_batch_size 1 \
|
--per_device_eval_batch_size 1 \
|
||||||
--gradient_accumulation_steps 8 \
|
--gradient_accumulation_steps 8 \
|
||||||
--lr_scheduler_type cosine \
|
--lr_scheduler_type cosine \
|
||||||
--logging_steps 10 \
|
--logging_steps 10 \
|
||||||
|
--warmup_steps 20 \
|
||||||
--save_steps 100 \
|
--save_steps 100 \
|
||||||
--eval_steps 100 \
|
--eval_steps 100 \
|
||||||
--evaluation_strategy steps \
|
--evaluation_strategy steps \
|
||||||
|
Loading…
x
Reference in New Issue
Block a user