mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2026-01-07 06:30:36 +08:00
14 lines
370 B
YAML
14 lines
370 B
YAML
### Note: DO NOT use quantized model or quantization_bit when merging lora adapters
|
|
|
|
### model
|
|
model_name_or_path: Qwen/Qwen3-4B-Instruct-2507
|
|
adapter_name_or_path: saves/qwen3-4b/lora/sft
|
|
template: qwen3_nothink
|
|
trust_remote_code: true
|
|
|
|
### export
|
|
export_dir: saves/qwen3_sft_merged
|
|
export_size: 5
|
|
export_device: cpu # choices: [cpu, auto]
|
|
export_legacy_format: false
|