LLaMA-Factory/examples/megatron/qwen2_vl_full.yaml
Kingsley 13170577b2
[feat] support megatron-LM training by mcore_adapter (#9237)
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
Co-authored-by: Yaowei Zheng <hiyouga@buaa.edu.cn>
2025-10-26 16:21:30 +08:00

30 lines
656 B
YAML

model_name_or_path: Qwen/Qwen2-VL-7B-Instruct
image_max_pixels: 262144
video_max_pixels: 16384
do_train: true
stage: sft
finetuning_type: full # only support full for now
dataset: llava_1k_en
preprocessing_num_workers: 8
cutoff_len: 4096
template: qwen2_vl
output_dir: saves/mca/qwen2_vl_full
per_device_train_batch_size: 1
gradient_accumulation_steps: 2
num_train_epochs: 2
learning_rate: 2e-5
logging_steps: 1
save_steps: 100
lr_scheduler_type: cosine
bf16: true
# mcore speed up
tensor_model_parallel_size: 4
sequence_parallel: true
pipeline_model_parallel_size: 2
bias_activation_fusion: true
apply_rope_fusion: true
use_distributed_optimizer: true