mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-10-14 23:58:11 +08:00
resolve confilct
Former-commit-id: d6168da2a1f74424b83416cbcbf685861e76ff5f
This commit is contained in:
parent
484128b641
commit
24d3c7e378
@ -103,12 +103,7 @@ def load_tokenizer(model_args: "ModelArguments") -> "TokenizerModule":
|
||||
setattr(processor, "video_resolution", model_args.video_resolution)
|
||||
setattr(processor, "video_fps", model_args.video_fps)
|
||||
setattr(processor, "video_maxlen", model_args.video_maxlen)
|
||||
if getattr(config, "model_type", None) == "qwen2_vl":
|
||||
setattr(processor, "video_factor", 2)
|
||||
else:
|
||||
setattr(processor, "video_factor", 1)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
except Exception:
|
||||
processor = None
|
||||
|
||||
# Avoid load tokenizer, see:
|
||||
|
Loading…
x
Reference in New Issue
Block a user