Former-commit-id: 26082fc6c90e6a399ae5b44f2c3df8019afc7766
This commit is contained in:
hoshi-hiyouga 2024-07-22 11:28:31 +08:00 committed by GitHub
parent dbe26e7cdf
commit 37c6a0c6dc

View File

@ -89,9 +89,6 @@ def patch_config(
if getattr(config, "model_type", None) == "qwen2" and is_trainable and model_args.flash_attn == "fa2":
setattr(config, "use_cache", False) # qwen2 does not support use_cache when using flash attn
if getattr(config, "model_type", None) == "chatglm":
require_version("transformers==4.41.2", "To fix: pip install transformers==4.41.2")
# deepspeed zero3 is not compatible with low_cpu_mem_usage
init_kwargs["low_cpu_mem_usage"] = model_args.low_cpu_mem_usage and (not is_deepspeed_zero3_enabled())