From 37c6a0c6dc89211d8567552bd397c0f3c9ee0a89 Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Mon, 22 Jul 2024 11:28:31 +0800 Subject: [PATCH] fix #4917 Former-commit-id: 26082fc6c90e6a399ae5b44f2c3df8019afc7766 --- src/llamafactory/model/patcher.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/llamafactory/model/patcher.py b/src/llamafactory/model/patcher.py index cc233311..b694d726 100644 --- a/src/llamafactory/model/patcher.py +++ b/src/llamafactory/model/patcher.py @@ -89,9 +89,6 @@ def patch_config( if getattr(config, "model_type", None) == "qwen2" and is_trainable and model_args.flash_attn == "fa2": setattr(config, "use_cache", False) # qwen2 does not support use_cache when using flash attn - if getattr(config, "model_type", None) == "chatglm": - require_version("transformers==4.41.2", "To fix: pip install transformers==4.41.2") - # deepspeed zero3 is not compatible with low_cpu_mem_usage init_kwargs["low_cpu_mem_usage"] = model_args.low_cpu_mem_usage and (not is_deepspeed_zero3_enabled())