mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2026-05-05 07:38:55 +08:00
[refactor] Add KTransformers AMX MoE SFT support via Accelerate (#10430)
Co-authored-by: mrhaoxx <mr.haoxx@gmail.com> Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -19,7 +19,7 @@ preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/Kllama_deepseekV2
|
||||
output_dir: saves/KT_FT_deepseekV2
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
@@ -39,14 +39,7 @@ ddp_timeout: 180000000
|
||||
resume_from_checkpoint: null
|
||||
|
||||
### ktransformers
|
||||
use_kt: true # use KTransformers as LoRA sft backend
|
||||
kt_optimize_rule: examples/ktransformers/kt_optimize_rules/DeepSeek-V2-Lite-Chat-sft-amx.yaml
|
||||
cpu_infer: 32
|
||||
chunk_size: 8192
|
||||
|
||||
### eval
|
||||
# eval_dataset: alpaca_en_demo
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
use_kt: true
|
||||
# Pair with fsdp2_kt_bf16.yaml for original BF16 checkpoints.
|
||||
# For pre-converted expert weights, uncomment kt_weight_path and use fsdp2_kt_int8.yaml or fsdp2_kt_int4.yaml.
|
||||
# kt_weight_path: /path/to/DeepSeek-V2-Lite-AMXINT8
|
||||
@@ -1,5 +1,5 @@
|
||||
### model
|
||||
model_name_or_path: opensourcerelease/DeepSeek-V3-bf16
|
||||
model_name_or_path: deepseek-ai/DeepSeek-V3-0324-BF16 # need to convert to BF16 checkpoint first
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
@@ -19,7 +19,7 @@ preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/Kllama_deepseekV3
|
||||
output_dir: saves/KT_FT_deepseekV3
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
@@ -39,14 +39,7 @@ ddp_timeout: 180000000
|
||||
resume_from_checkpoint: null
|
||||
|
||||
### ktransformers
|
||||
use_kt: true # use KTransformers as LoRA sft backend
|
||||
kt_optimize_rule: examples/ktransformers/kt_optimize_rules/DeepSeek-V3-Chat-sft-amx-multi-gpu.yaml
|
||||
cpu_infer: 32
|
||||
chunk_size: 8192
|
||||
|
||||
### eval
|
||||
# eval_dataset: alpaca_en_demo
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
use_kt: true
|
||||
# Pair with fsdp2_kt_bf16.yaml for original BF16 checkpoints.
|
||||
# For pre-converted expert weights, uncomment kt_weight_path and use fsdp2_kt_int8.yaml or fsdp2_kt_int4.yaml.
|
||||
# kt_weight_path: /path/to/DeepSeek-V3-AMXINT8
|
||||
@@ -0,0 +1,46 @@
|
||||
### model
|
||||
model_name_or_path: Qwen/Qwen3.5-397B-A17B
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: lora
|
||||
lora_rank: 8
|
||||
lora_target: all
|
||||
|
||||
### dataset
|
||||
dataset: identity, alpaca_en_demo
|
||||
template: qwen3_5
|
||||
cutoff_len: 2048
|
||||
max_samples: 100000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/KT_FT_qwen35Moe
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 1.0e-4
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
resume_from_checkpoint: null
|
||||
|
||||
### ktransformers
|
||||
use_kt: true
|
||||
# For original BF16 checkpoints, start with examples/ktransformers/accelerate/fsdp2_kt_bf16.yaml.
|
||||
# For pre-converted expert weights, uncomment kt_weight_path and use fsdp2_kt_int8.yaml or fsdp2_kt_int4.yaml.
|
||||
# Pair the 397B path with fsdp2_kt_int8.yaml, tune cutoff_len to prepared weights and GPU memory.
|
||||
# kt_weight_path: /path/to/Qwen3.5-MoE-AMXINT8
|
||||
@@ -11,7 +11,7 @@ lora_target: all
|
||||
|
||||
### dataset
|
||||
dataset: identity, alpaca_en_demo
|
||||
template: qwen3_nothink
|
||||
template: qwen3
|
||||
cutoff_len: 2048
|
||||
max_samples: 100000
|
||||
overwrite_cache: true
|
||||
@@ -19,9 +19,9 @@ preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/Kllama_Qwen3MoE_235bA22b
|
||||
output_dir: saves/KT_FT_qwen3Moe
|
||||
logging_steps: 10
|
||||
save_steps: 200
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
@@ -31,7 +31,7 @@ report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 1.0e-4
|
||||
num_train_epochs: 3
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
@@ -39,14 +39,7 @@ ddp_timeout: 180000000
|
||||
resume_from_checkpoint: null
|
||||
|
||||
### ktransformers
|
||||
use_kt: true # use KTransformers as LoRA sft backend
|
||||
kt_optimize_rule: examples/ktransformers/kt_optimize_rules/Qwen3Moe-sft-amx.yaml
|
||||
cpu_infer: 32
|
||||
chunk_size: 8192
|
||||
|
||||
### eval
|
||||
# eval_dataset: alpaca_en_demo
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
use_kt: true
|
||||
# Pair with examples/ktransformers/accelerate/fsdp2_kt_bf16.yaml for original BF16 checkpoints.
|
||||
# For pre-converted expert weights, uncomment kt_weight_path and use fsdp2_kt_int8.yaml or fsdp2_kt_int4.yaml.
|
||||
# kt_weight_path: /path/to/Qwen3-235B-A22B-Instruct-2507-AMXINT8
|
||||
|
||||
Reference in New Issue
Block a user