mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2026-05-05 07:38:55 +08:00
48 lines
1.0 KiB
YAML
48 lines
1.0 KiB
YAML
# Start FSDP2 full fine-tuning on Ascend NPU
|
|
# Usage:
|
|
# accelerate launch \
|
|
# --config_file examples/accelerate/fsdp2_config_qwen35.yaml \
|
|
# src/train.py examples/ascend/qwen3_5_full_sft_fsdp2.yaml
|
|
#
|
|
# Note: Change `num_processes` in fsdp2_config_qwen35.yaml to match your NPU count
|
|
|
|
### model
|
|
model_name_or_path: Qwen/Qwen3.5-4B
|
|
trust_remote_code: true
|
|
use_v1_kernels: true
|
|
flash_attn: fa2
|
|
|
|
### method
|
|
stage: sft
|
|
do_train: true
|
|
finetuning_type: full
|
|
|
|
### dataset
|
|
dataset: alpaca_en_demo
|
|
template: qwen3_5_nothink
|
|
cutoff_len: 2048
|
|
max_samples: 1000
|
|
overwrite_cache: true
|
|
preprocessing_num_workers: 16
|
|
dataloader_num_workers: 4
|
|
|
|
### output
|
|
output_dir: saves/Qwen3.5-4B/full/sft
|
|
logging_steps: 1
|
|
save_steps: 500
|
|
max_steps: 500
|
|
plot_loss: true
|
|
overwrite_output_dir: true
|
|
save_only_model: false
|
|
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
|
|
|
### train
|
|
per_device_train_batch_size: 8
|
|
gradient_accumulation_steps: 1
|
|
learning_rate: 1.0e-5
|
|
lr_scheduler_type: cosine
|
|
warmup_ratio: 0.1
|
|
bf16: true
|
|
ddp_timeout: 1800
|
|
resume_from_checkpoint: null
|