LLaMA-Factory/examples/train_lora/llama3_lora_pretrain.yaml
codingma 1ccc6153c7 1. fix output_dir in llama3_lora_pretrain.yaml
2. add llava1_5.yaml for inference


Former-commit-id: 982a1cdd24dfa51535af3e49c7ea80fddc95b0ee
2024-07-13 13:16:22 +08:00

39 lines
676 B
YAML

### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
### method
stage: pt
do_train: true
finetuning_type: lora
lora_target: all
### dataset
dataset: c4_demo
cutoff_len: 1024
max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
### output
output_dir: saves/llama3-8b/lora/pretrain
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
learning_rate: 1.0e-4
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_ratio: 0.1
bf16: true
ddp_timeout: 180000000
### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500