[feat] fp8 training (#8960)

Co-authored-by: Benjamin Feuer <penfever@gmail.com>
Co-authored-by: Yaowei Zheng <hiyouga@buaa.edu.cn>
This commit is contained in:
Ben Feuer
2025-09-30 23:32:53 -07:00
committed by GitHub
parent a756f1aff8
commit 29baefbc1a
9 changed files with 367 additions and 3 deletions

View File

@@ -0,0 +1,45 @@
{
"train_micro_batch_size_per_gpu": "auto",
"gradient_clipping": "auto",
"zero_allow_untested_optimizer": true,
"zero_force_ds_cpu_optimizer": true,
"fp16": {
"enabled": false,
"loss_scale": 0,
"loss_scale_window": 1000,
"initial_scale_power": 16,
"hysteresis": 2,
"min_loss_scale": 1
},
"bf16": {
"enabled": "auto"
},
"zero_optimization": {
"stage": 3,
"offload_optimizer": {
"device": "cpu",
"pin_memory": false
},
"overlap_comm": false,
"contiguous_gradients": true,
"sub_group_size": 1000000000,
"reduce_bucket_size": 12845056,
"stage3_prefetch_bucket_size": 11560550,
"stage3_param_persistence_threshold": 35840,
"stage3_max_live_parameters": 1000000000,
"stage3_max_reuse_distance": 1000000000,
"stage3_gather_16bit_weights_on_model_save": true
},
"steps_per_print": 10000000,
"gradient_accumulation_steps": "auto",
"comms_config": {
"verbose": false
},
"monitor_config": {
"enabled": true,
"tag": "DeepSpeedMonitor",
"csv_monitor": {
"enabled": false
}
}
}

View File

@@ -0,0 +1,48 @@
# FP8 training example with DeepSpeed ZeRO-3
# This config demonstrates FP8 mixed precision training using HuggingFace Accelerate
# with DeepSpeed providing memory optimization (not FP8 handling)
### Model configuration
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
trust_remote_code: true
### Method configuration
stage: sft
do_train: true
finetuning_type: full
### Dataset configuration
dataset: identity
template: llama3
cutoff_len: 1024
max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
### Output configuration
output_dir: saves/llama3-8b/fp8-deepspeed/sft
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
### Training configuration
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
learning_rate: 5.0e-5
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_ratio: 0.1
bf16: true
### FP8 configuration
fp8: true
fp8_backend: torchao # Use TorchAO backend for FP8
fp8_enable_fsdp_float8_all_gather: false # Not used with DeepSpeed
### DeepSpeed configuration
deepspeed: examples/deepspeed/ds_z3_fp8_config.json
### Logging configuration
report_to: wandb
run_name: llama3_fp8_deepspeed_sft

View File

@@ -0,0 +1,51 @@
# FP8 training example with FSDP
# This config demonstrates FP8 mixed precision training using HuggingFace Accelerate
# with FSDP for distributed training and float8 all-gather optimization
### Model configuration
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
trust_remote_code: true
### Method configuration
stage: sft
do_train: true
finetuning_type: full
### Dataset configuration
dataset: identity
template: llama3
cutoff_len: 1024
max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
### Output configuration
output_dir: saves/llama3-8b/fp8-fsdp/sft
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
### Training configuration
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
learning_rate: 5.0e-5
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_ratio: 0.1
bf16: true
### FP8 configuration
fp8: true
fp8_backend: torchao # Use TorchAO backend for FP8
fp8_enable_fsdp_float8_all_gather: true # Enable FSDP2 float8 all-gather optimization
### FSDP configuration (using training arguments - no separate FSDP config file)
fsdp:
- full_shard
- auto_wrap
fsdp_transformer_layer_cls_to_wrap: LlamaDecoderLayer
### Logging configuration
report_to: wandb
run_name: llama3_fp8_fsdp_sft