fix webchatmodel

Former-commit-id: 4c71c314fb23e0e1eb294abf0d6c4cccfb531716
This commit is contained in:
hoshi-hiyouga 2024-04-24 13:54:21 +08:00 committed by GitHub
parent 4a854dfe27
commit ce36e316bc

View File

@ -72,7 +72,7 @@ class WebChatModel(ChatModel):
finetuning_type=get("top.finetuning_type"), finetuning_type=get("top.finetuning_type"),
quantization_bit=int(get("top.quantization_bit")) if get("top.quantization_bit") in ["8", "4"] else None, quantization_bit=int(get("top.quantization_bit")) if get("top.quantization_bit") in ["8", "4"] else None,
template=get("top.template"), template=get("top.template"),
flash_attn=(get("top.booster") == "flash_attn"), flash_attn="fa2" if get("top.booster") == "flashattn2" else "auto",
use_unsloth=(get("top.booster") == "unsloth"), use_unsloth=(get("top.booster") == "unsloth"),
rope_scaling=get("top.rope_scaling") if get("top.rope_scaling") in ["linear", "dynamic"] else None, rope_scaling=get("top.rope_scaling") if get("top.rope_scaling") in ["linear", "dynamic"] else None,
infer_backend=get("infer.infer_backend"), infer_backend=get("infer.infer_backend"),