mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-12-14 19:06:26 +08:00
@@ -25,10 +25,10 @@ overwrite_output_dir: true
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 0.0001
|
||||
learning_rate: 1.0e-4
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_steps: 0.1
|
||||
warmup_ratio: 0.1
|
||||
fp16: true
|
||||
|
||||
### eval
|
||||
|
||||
@@ -25,10 +25,10 @@ overwrite_output_dir: true
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 0.0001
|
||||
learning_rate: 1.0e-4
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_steps: 0.1
|
||||
warmup_ratio: 0.1
|
||||
fp16: true
|
||||
|
||||
### eval
|
||||
|
||||
@@ -26,10 +26,10 @@ overwrite_output_dir: true
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 0.0001
|
||||
learning_rate: 1.0e-4
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_steps: 0.1
|
||||
warmup_ratio: 0.1
|
||||
fp16: true
|
||||
|
||||
### eval
|
||||
|
||||
@@ -25,10 +25,10 @@ overwrite_output_dir: true
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 0.0001
|
||||
learning_rate: 1.0e-4
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_steps: 0.1
|
||||
warmup_ratio: 0.1
|
||||
fp16: true
|
||||
|
||||
### eval
|
||||
|
||||
Reference in New Issue
Block a user