From 5c53cf324485927d10e5464dd5bda2cdc5a7a0ad Mon Sep 17 00:00:00 2001 From: BUAADreamer <1428195643@qq.com> Date: Tue, 10 Sep 2024 12:39:17 +0800 Subject: [PATCH] resolve confilct Former-commit-id: 96decf82b872a4ec06450b4440336d38475d1d02 --- src/llamafactory/model/loader.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/src/llamafactory/model/loader.py b/src/llamafactory/model/loader.py index 6b0fa719..96fb5760 100644 --- a/src/llamafactory/model/loader.py +++ b/src/llamafactory/model/loader.py @@ -103,12 +103,7 @@ def load_tokenizer(model_args: "ModelArguments") -> "TokenizerModule": setattr(processor, "video_resolution", model_args.video_resolution) setattr(processor, "video_fps", model_args.video_fps) setattr(processor, "video_maxlen", model_args.video_maxlen) - if getattr(config, "model_type", None) == "qwen2_vl": - setattr(processor, "video_factor", 2) - else: - setattr(processor, "video_factor", 1) - except Exception as e: - print(e) + except Exception: processor = None # Avoid load tokenizer, see: