mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-12-15 11:20:35 +08:00
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
23 lines
592 B
YAML
23 lines
592 B
YAML
compute_environment: LOCAL_MACHINE
|
|
debug: false
|
|
distributed_type: FSDP
|
|
downcast_bf16: 'no'
|
|
fsdp_config:
|
|
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
|
|
fsdp_cpu_ram_efficient_loading: true
|
|
fsdp_offload_params: false
|
|
fsdp_reshard_after_forward: true
|
|
fsdp_state_dict_type: FULL_STATE_DICT
|
|
fsdp_version: 2
|
|
machine_rank: 0
|
|
main_training_function: main
|
|
mixed_precision: bf16 # or fp16
|
|
num_machines: 1 # the number of nodes
|
|
num_processes: 2 # the number of GPUs in all nodes
|
|
rdzv_backend: static
|
|
same_network: true
|
|
tpu_env: []
|
|
tpu_use_cluster: false
|
|
tpu_use_sudo: false
|
|
use_cpu: false
|