mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-08-23 14:22:51 +08:00
resolve confilct
Former-commit-id: 96decf82b872a4ec06450b4440336d38475d1d02
This commit is contained in:
parent
f00f4ae9b6
commit
5c53cf3244
@ -103,12 +103,7 @@ def load_tokenizer(model_args: "ModelArguments") -> "TokenizerModule":
|
||||
setattr(processor, "video_resolution", model_args.video_resolution)
|
||||
setattr(processor, "video_fps", model_args.video_fps)
|
||||
setattr(processor, "video_maxlen", model_args.video_maxlen)
|
||||
if getattr(config, "model_type", None) == "qwen2_vl":
|
||||
setattr(processor, "video_factor", 2)
|
||||
else:
|
||||
setattr(processor, "video_factor", 1)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
except Exception:
|
||||
processor = None
|
||||
|
||||
# Avoid load tokenizer, see:
|
||||
|
Loading…
x
Reference in New Issue
Block a user