mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2026-01-12 17:10:36 +08:00
[fix] correct ktransformers example config paths and templates (#9732)
This commit is contained in:
@@ -5,6 +5,6 @@ infer_backend: ktransformers # choices: [huggingface, vllm, sglang, ktransforme
|
|||||||
trust_remote_code: true
|
trust_remote_code: true
|
||||||
|
|
||||||
use_kt: true # use KTransformers as LoRA sft backend to inference
|
use_kt: true # use KTransformers as LoRA sft backend to inference
|
||||||
kt_optimize_rule: examples/kt_optimize_rules/DeepSeek-V2-Lite-Chat-sft-amx.yaml
|
kt_optimize_rule: examples/ktransformers/kt_optimize_rules/DeepSeek-V2-Lite-Chat-sft-amx.yaml
|
||||||
cpu_infer: 32
|
cpu_infer: 32
|
||||||
chunk_size: 8192
|
chunk_size: 8192
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
model_name_or_path: opensourcerelease/DeepSeek-V3-bf16
|
model_name_or_path: opensourcerelease/DeepSeek-V3-bf16
|
||||||
template: deepseek
|
template: deepseek3
|
||||||
infer_backend: ktransformers # choices: [huggingface, vllm, sglang, ktransformers]
|
infer_backend: ktransformers # choices: [huggingface, vllm, sglang, ktransformers]
|
||||||
trust_remote_code: true
|
trust_remote_code: true
|
||||||
|
|
||||||
use_kt: true # use KTransformers as LoRA sft backend to inference
|
use_kt: true # use KTransformers as LoRA sft backend to inference
|
||||||
kt_optimize_rule: examples/kt_optimize_rules/DeepSeek-V3-Chat-sft-amx-multi-gpu.yaml
|
kt_optimize_rule: examples/ktransformers/kt_optimize_rules/DeepSeek-V3-Chat-sft-amx-multi-gpu.yaml
|
||||||
cpu_infer: 32
|
cpu_infer: 32
|
||||||
chunk_size: 8192
|
chunk_size: 8192
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
model_name_or_path: opensourcerelease/DeepSeek-V3-bf16
|
model_name_or_path: opensourcerelease/DeepSeek-V3-bf16
|
||||||
adapter_name_or_path: saves/Kllama_deepseekV3
|
adapter_name_or_path: saves/Kllama_deepseekV3
|
||||||
template: deepseek
|
template: deepseek3
|
||||||
infer_backend: ktransformers # choices: [huggingface, vllm, sglang, ktransformers]
|
infer_backend: ktransformers # choices: [huggingface, vllm, sglang, ktransformers]
|
||||||
trust_remote_code: true
|
trust_remote_code: true
|
||||||
|
|
||||||
use_kt: true # use KTransformers as LoRA sft backend to inference
|
use_kt: true # use KTransformers as LoRA sft backend to inference
|
||||||
kt_optimize_rule: examples/kt_optimize_rules/DeepSeek-V3-Chat-sft-amx-multi-gpu.yaml
|
kt_optimize_rule: examples/ktransformers/kt_optimize_rules/DeepSeek-V3-Chat-sft-amx-multi-gpu.yaml
|
||||||
cpu_infer: 32
|
cpu_infer: 32
|
||||||
chunk_size: 8192
|
chunk_size: 8192
|
||||||
|
|||||||
@@ -5,6 +5,6 @@ infer_backend: ktransformers # choices: [huggingface, vllm, sglang, ktransforme
|
|||||||
trust_remote_code: true
|
trust_remote_code: true
|
||||||
|
|
||||||
use_kt: true # use KTransformers as LoRA sft backend to inference
|
use_kt: true # use KTransformers as LoRA sft backend to inference
|
||||||
kt_optimize_rule: examples/kt_optimize_rules/Qwen3Moe-sft-amx.yaml
|
kt_optimize_rule: examples/ktransformers/kt_optimize_rules/Qwen3Moe-sft-amx.yaml
|
||||||
cpu_infer: 32
|
cpu_infer: 32
|
||||||
chunk_size: 8192
|
chunk_size: 8192
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ lora_rank: 8
|
|||||||
lora_target: all
|
lora_target: all
|
||||||
|
|
||||||
### dataset
|
### dataset
|
||||||
dataset: identity
|
dataset: identity, alpaca_en_demo
|
||||||
template: deepseek
|
template: deepseek
|
||||||
cutoff_len: 2048
|
cutoff_len: 2048
|
||||||
max_samples: 100000
|
max_samples: 100000
|
||||||
@@ -40,7 +40,7 @@ resume_from_checkpoint: null
|
|||||||
|
|
||||||
### ktransformers
|
### ktransformers
|
||||||
use_kt: true # use KTransformers as LoRA sft backend
|
use_kt: true # use KTransformers as LoRA sft backend
|
||||||
kt_optimize_rule: examples/kt_optimize_rules/DeepSeek-V2-Lite-Chat-sft-amx.yaml
|
kt_optimize_rule: examples/ktransformers/kt_optimize_rules/DeepSeek-V2-Lite-Chat-sft-amx.yaml
|
||||||
cpu_infer: 32
|
cpu_infer: 32
|
||||||
chunk_size: 8192
|
chunk_size: 8192
|
||||||
|
|
||||||
|
|||||||
@@ -10,8 +10,8 @@ lora_rank: 8
|
|||||||
lora_target: all
|
lora_target: all
|
||||||
|
|
||||||
### dataset
|
### dataset
|
||||||
dataset: identity
|
dataset: identity, alpaca_en_demo
|
||||||
template: deepseek
|
template: deepseek3
|
||||||
cutoff_len: 2048
|
cutoff_len: 2048
|
||||||
max_samples: 100000
|
max_samples: 100000
|
||||||
overwrite_cache: true
|
overwrite_cache: true
|
||||||
@@ -40,7 +40,7 @@ resume_from_checkpoint: null
|
|||||||
|
|
||||||
### ktransformers
|
### ktransformers
|
||||||
use_kt: true # use KTransformers as LoRA sft backend
|
use_kt: true # use KTransformers as LoRA sft backend
|
||||||
kt_optimize_rule: examples/kt_optimize_rules/DeepSeek-V3-Chat-sft-amx-multi-gpu.yaml
|
kt_optimize_rule: examples/ktransformers/kt_optimize_rules/DeepSeek-V3-Chat-sft-amx-multi-gpu.yaml
|
||||||
cpu_infer: 32
|
cpu_infer: 32
|
||||||
chunk_size: 8192
|
chunk_size: 8192
|
||||||
|
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ resume_from_checkpoint: null
|
|||||||
|
|
||||||
### ktransformers
|
### ktransformers
|
||||||
use_kt: true # use KTransformers as LoRA sft backend
|
use_kt: true # use KTransformers as LoRA sft backend
|
||||||
kt_optimize_rule: examples/kt_optimize_rules/Qwen3Moe-sft-amx.yaml
|
kt_optimize_rule: examples/ktransformers/kt_optimize_rules/Qwen3Moe-sft-amx.yaml
|
||||||
cpu_infer: 32
|
cpu_infer: 32
|
||||||
chunk_size: 8192
|
chunk_size: 8192
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user