[npu] add Qwen3.5 support with Partial RoPE and Hybrid Attention (#10421)

Co-authored-by: Curnane <mingliangfu@users.noreply.github.com>
This commit is contained in:
curnane-lab
2026-04-27 23:36:07 +08:00
committed by GitHub
parent 99464b3d03
commit 2092abc217
4 changed files with 224 additions and 43 deletions

View File

@@ -0,0 +1,20 @@
compute_environment: LOCAL_MACHINE
debug: false
distributed_type: FSDP
downcast_bf16: 'no'
fsdp_config:
fsdp_version: 2
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
fsdp_transformer_layer_cls_to_wrap: Qwen3_5DecoderLayer,Qwen3_5VisionBlock
fsdp_cpu_ram_efficient_loading: true
fsdp_offload_params: false
fsdp_reshard_after_forward: true
fsdp_state_dict_type: FULL_STATE_DICT
machine_rank: 0
main_training_function: main
mixed_precision: bf16
num_machines: 1
num_processes: 8 # Change to match your NPU count (e.g., 8 for A2, 16 for A3)
rdzv_backend: static
same_network: true
use_cpu: false