From 24d3c7e378a2bdda614cefd81de464e1a6091b18 Mon Sep 17 00:00:00 2001 From: BUAADreamer <1428195643@qq.com> Date: Tue, 10 Sep 2024 12:39:17 +0800 Subject: [PATCH] resolve confilct Former-commit-id: d6168da2a1f74424b83416cbcbf685861e76ff5f --- src/llamafactory/model/loader.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/src/llamafactory/model/loader.py b/src/llamafactory/model/loader.py index 6b0fa719..96fb5760 100644 --- a/src/llamafactory/model/loader.py +++ b/src/llamafactory/model/loader.py @@ -103,12 +103,7 @@ def load_tokenizer(model_args: "ModelArguments") -> "TokenizerModule": setattr(processor, "video_resolution", model_args.video_resolution) setattr(processor, "video_fps", model_args.video_fps) setattr(processor, "video_maxlen", model_args.video_maxlen) - if getattr(config, "model_type", None) == "qwen2_vl": - setattr(processor, "video_factor", 2) - else: - setattr(processor, "video_factor", 1) - except Exception as e: - print(e) + except Exception: processor = None # Avoid load tokenizer, see: