Update sft.sh

Former-commit-id: 57dcd91e17833a0eeb8d99af92ac73c132a77648
This commit is contained in:
hoshi-hiyouga 2024-04-16 17:25:40 +08:00 committed by GitHub
parent 4133811434
commit 507ab397f5

View File

@ -1,7 +1,6 @@
# BAdam layer-wise #!/bin/bash
export CUDA_VISIBLE_DEVICES=0
export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True CUDA_VISIBLE_DEVICES=0 python ../../../src/train_bash.py \
python ../../../src/train_bash.py \
--stage sft \ --stage sft \
--do_train \ --do_train \
--model_name_or_path meta-llama/Llama-2-7b-hf \ --model_name_or_path meta-llama/Llama-2-7b-hf \
@ -9,14 +8,18 @@ python ../../../src/train_bash.py \
--dataset_dir ../../../data \ --dataset_dir ../../../data \
--template default \ --template default \
--finetuning_type full \ --finetuning_type full \
--output_dir ../../../saves/LLaMA2-7B/badam \ --use_badam \
--badam_switch_mode descending \
--badam_switch_block_every 50 \
--badam_verbose 2 \
--output_dir ../../../saves/LLaMA2-7B/badam/sft \
--overwrite_cache \ --overwrite_cache \
--overwrite_output_dir \ --overwrite_output_dir \
--cutoff_len 1024 \ --cutoff_len 1024 \
--preprocessing_num_workers 32 \ --preprocessing_num_workers 16 \
--per_device_train_batch_size 8 \ --per_device_train_batch_size 1 \
--per_device_eval_batch_size 5 \ --per_device_eval_batch_size 1 \
--gradient_accumulation_steps 2 \ --gradient_accumulation_steps 8 \
--lr_scheduler_type cosine \ --lr_scheduler_type cosine \
--logging_steps 10 \ --logging_steps 10 \
--warmup_steps 20 \ --warmup_steps 20 \
@ -26,10 +29,7 @@ python ../../../src/train_bash.py \
--load_best_model_at_end \ --load_best_model_at_end \
--learning_rate 5e-5 \ --learning_rate 5e-5 \
--num_train_epochs 3.0 \ --num_train_epochs 3.0 \
--max_samples 3000 \
--val_size 0.1 \ --val_size 0.1 \
--plot_loss \ --plot_loss \
--use_badam \ --pure_bf16
--switch_mode descending \
--badam_verbose 2 \
--switch_block_every 50