# pip install git+https://github.com/hiyouga/transformers.git@llama4_train ### model model_name_or_path: meta-llama/Llama-4-Scout-17B-16E-Instruct trust_remote_code: true ### method stage: sft do_train: true finetuning_type: lora lora_rank: 8 lora_target: all deepspeed: examples/deepspeed/ds_z3_config.json # choices: [ds_z0_config.json, ds_z2_config.json, ds_z3_config.json] ### dataset dataset: mllm_demo,identity,alpaca_en_demo template: llama4 cutoff_len: 2048 max_samples: 1000 overwrite_cache: true preprocessing_num_workers: 16 dataloader_num_workers: 4 ### output output_dir: saves/llama4-8b/lora/sft logging_steps: 10 save_steps: 500 plot_loss: true overwrite_output_dir: true save_only_model: false ### train per_device_train_batch_size: 1 gradient_accumulation_steps: 8 learning_rate: 1.0e-4 num_train_epochs: 3.0 lr_scheduler_type: cosine warmup_ratio: 0.1 bf16: true ddp_timeout: 180000000 resume_from_checkpoint: null ### eval # eval_dataset: alpaca_en_demo # val_size: 0.1 # per_device_eval_batch_size: 1 # eval_strategy: steps # eval_steps: 500