fix quant infer and qwen2moe

Former-commit-id: b75d16767f35c36e2cf2aaab8a3844135085bccf
This commit is contained in:
hiyouga 2024-04-09 17:12:59 +08:00
parent 6030a4a720
commit 566d71b7a9
2 changed files with 3 additions and 3 deletions

View File

@ -109,9 +109,6 @@ def load_model(
if not is_trainable:
model.requires_grad_(False)
model.eval()
for param in model.parameters():
if param.device.type == "cuda":
param.data = param.data.to(model_args.compute_dtype)
else:
model.train()

View File

@ -316,6 +316,9 @@ def patch_config(
if getattr(config, "model_type", None) == "qwen2" and is_trainable and model_args.flash_attn:
setattr(config, "use_cache", False) # qwen2 does not support use_cache when using flashattn
if getattr(config, "model_type", None) == "qwen2_moe" and is_trainable:
setattr(config, "output_router_logits", True)
init_kwargs["torch_dtype"] = model_args.compute_dtype
if not is_deepspeed_zero3_enabled():
init_kwargs["low_cpu_mem_usage"] = model_args.low_cpu_mem_usage