From b2dc6dc59a95f4f8c52d2642c363fb3ff09b3ab3 Mon Sep 17 00:00:00 2001 From: hiyouga <467089858@qq.com> Date: Tue, 8 Oct 2024 17:48:56 +0800 Subject: [PATCH] tiny fix Former-commit-id: d8ddd07c2ed14d871fb25743c20265fc99e3e221 --- requirements.txt | 2 +- src/llamafactory/__init__.py | 6 +++--- src/llamafactory/data/template.py | 4 +--- src/llamafactory/extras/misc.py | 2 +- src/llamafactory/model/loader.py | 6 +++--- src/llamafactory/model/model_utils/longlora.py | 2 +- src/llamafactory/model/model_utils/packing.py | 2 +- src/llamafactory/train/tuner.py | 2 +- 8 files changed, 12 insertions(+), 14 deletions(-) diff --git a/requirements.txt b/requirements.txt index e913c58d..69489bec 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -transformers>=4.41.2,<=4.45.0 +transformers>=4.41.2,<=4.45.2 datasets>=2.16.0,<=2.21.0 accelerate>=0.30.1,<=0.34.2 peft>=0.11.1,<=0.12.0 diff --git a/src/llamafactory/__init__.py b/src/llamafactory/__init__.py index 5293c512..ffc8c9ad 100644 --- a/src/llamafactory/__init__.py +++ b/src/llamafactory/__init__.py @@ -20,7 +20,7 @@ Level: Dependency graph: main: - transformers>=4.41.2,<=4.45.0 + transformers>=4.41.2,<=4.45.2 datasets>=2.16.0,<=2.21.0 accelerate>=0.30.1,<=0.34.2 peft>=0.11.1,<=0.12.0 @@ -28,9 +28,9 @@ Dependency graph: attention: transformers>=4.42.4 (gemma+fa2) longlora: - transformers>=4.41.2,<=4.45.0 + transformers>=4.41.2,<=4.45.2 packing: - transformers>=4.41.2,<=4.45.0 + transformers>=4.41.2,<=4.45.2 Disable version checking: DISABLE_VERSION_CHECK=1 Enable VRAM recording: RECORD_VRAM=1 diff --git a/src/llamafactory/data/template.py b/src/llamafactory/data/template.py index e832e3ad..bfa987d9 100644 --- a/src/llamafactory/data/template.py +++ b/src/llamafactory/data/template.py @@ -357,9 +357,7 @@ def get_template_and_fix_tokenizer(tokenizer: "PreTrainedTokenizer", data_args: Gets chat template and fixes the tokenizer. """ if data_args.template in ["llava", "paligemma", "qwen2_vl"]: - require_version( - "transformers>=4.45.0.dev0", "To fix: pip install git+https://github.com/huggingface/transformers.git" - ) + require_version("transformers>=4.45.0", "To fix: pip install transformers>=4.45.0") require_version("accelerate>=0.34.0", "To fix: pip install accelerate>=0.34.0") if data_args.template is None: diff --git a/src/llamafactory/extras/misc.py b/src/llamafactory/extras/misc.py index de034ef6..7d0a457a 100644 --- a/src/llamafactory/extras/misc.py +++ b/src/llamafactory/extras/misc.py @@ -79,7 +79,7 @@ def check_dependencies() -> None: if os.environ.get("DISABLE_VERSION_CHECK", "0").lower() in ["true", "1"]: logger.warning("Version checking has been disabled, may lead to unexpected behaviors.") else: - require_version("transformers>=4.41.2,<=4.45.0", "To fix: pip install transformers>=4.41.2,<=4.45.0") + require_version("transformers>=4.41.2,<=4.45.2", "To fix: pip install transformers>=4.41.2,<=4.45.2") require_version("datasets>=2.16.0,<=2.21.0", "To fix: pip install datasets>=2.16.0,<=2.21.0") require_version("accelerate>=0.30.1,<=0.34.2", "To fix: pip install accelerate>=0.30.1,<=0.34.2") require_version("peft>=0.11.1,<=0.12.0", "To fix: pip install peft>=0.11.1,<=0.12.0") diff --git a/src/llamafactory/model/loader.py b/src/llamafactory/model/loader.py index ee09db43..fb71498b 100644 --- a/src/llamafactory/model/loader.py +++ b/src/llamafactory/model/loader.py @@ -83,7 +83,7 @@ def load_tokenizer(model_args: "ModelArguments") -> "TokenizerModule": **init_kwargs, ) except Exception as e: - raise OSError("Failed to load tokenizer") from e + raise OSError("Failed to load tokenizer.") from e if model_args.new_special_tokens is not None: num_added_tokens = tokenizer.add_special_tokens( @@ -100,12 +100,12 @@ def load_tokenizer(model_args: "ModelArguments") -> "TokenizerModule": processor = AutoProcessor.from_pretrained(model_args.model_name_or_path, **init_kwargs) patch_processor(processor, config, tokenizer, model_args) except Exception as e: - logger.warning("Failed to load processor. Error: {}".format(e)) + logger.warning("Processor was not found: {}.".format(e)) processor = None # Avoid load tokenizer, see: # https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/models/auto/processing_auto.py#L324 - if processor and "Processor" not in processor.__class__.__name__: + if processor is not None and "Processor" not in processor.__class__.__name__: processor = None return {"tokenizer": tokenizer, "processor": processor} diff --git a/src/llamafactory/model/model_utils/longlora.py b/src/llamafactory/model/model_utils/longlora.py index b341653a..e87e5b8b 100644 --- a/src/llamafactory/model/model_utils/longlora.py +++ b/src/llamafactory/model/model_utils/longlora.py @@ -353,7 +353,7 @@ def llama_sdpa_attention_forward( def _apply_llama_patch() -> None: - require_version("transformers>=4.41.2,<=4.45.0", "To fix: pip install transformers>=4.41.2,<=4.45.0") + require_version("transformers>=4.41.2,<=4.45.2", "To fix: pip install transformers>=4.41.2,<=4.45.2") LlamaAttention.forward = llama_attention_forward LlamaFlashAttention2.forward = llama_flash_attention_2_forward LlamaSdpaAttention.forward = llama_sdpa_attention_forward diff --git a/src/llamafactory/model/model_utils/packing.py b/src/llamafactory/model/model_utils/packing.py index d52731b8..2ae3a6ff 100644 --- a/src/llamafactory/model/model_utils/packing.py +++ b/src/llamafactory/model/model_utils/packing.py @@ -114,7 +114,7 @@ def get_unpad_data(attention_mask: "torch.Tensor") -> Tuple["torch.Tensor", "tor def _patch_for_block_diag_attn(model_type: str) -> None: - require_version("transformers>=4.41.2,<=4.45.0", "To fix: pip install transformers>=4.41.2,<=4.45.0") + require_version("transformers>=4.41.2,<=4.45.2", "To fix: pip install transformers>=4.41.2,<=4.45.2") if is_transformers_version_greater_than_4_43(): import transformers.modeling_flash_attention_utils diff --git a/src/llamafactory/train/tuner.py b/src/llamafactory/train/tuner.py index 3c8a3b13..1fd202aa 100644 --- a/src/llamafactory/train/tuner.py +++ b/src/llamafactory/train/tuner.py @@ -140,4 +140,4 @@ def export_model(args: Optional[Dict[str, Any]] = None) -> None: ) except Exception as e: - logger.warning("Cannot save tokenizer, please copy the files manually. Error: {}".format(e)) + logger.warning("Cannot save tokenizer, please copy the files manually: {}.".format(e))