From cdf9dae53efa7a014330bf7cdcc65b9e3d696997 Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Mon, 22 Jul 2024 11:28:31 +0800 Subject: [PATCH] fix #4917 Former-commit-id: e26919aafd8436489d065789c9c25d72c8d05a6d --- src/llamafactory/model/patcher.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/llamafactory/model/patcher.py b/src/llamafactory/model/patcher.py index cc233311..b694d726 100644 --- a/src/llamafactory/model/patcher.py +++ b/src/llamafactory/model/patcher.py @@ -89,9 +89,6 @@ def patch_config( if getattr(config, "model_type", None) == "qwen2" and is_trainable and model_args.flash_attn == "fa2": setattr(config, "use_cache", False) # qwen2 does not support use_cache when using flash attn - if getattr(config, "model_type", None) == "chatglm": - require_version("transformers==4.41.2", "To fix: pip install transformers==4.41.2") - # deepspeed zero3 is not compatible with low_cpu_mem_usage init_kwargs["low_cpu_mem_usage"] = model_args.low_cpu_mem_usage and (not is_deepspeed_zero3_enabled())