mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-10-14 23:58:11 +08:00
fix tokenizer padding side in evaluate.py
Former-commit-id: bcb43ff8ba1946c1f7e7865c9d0fb47ba276935d
This commit is contained in:
parent
d602f06882
commit
c0658711ca
@ -122,7 +122,7 @@ def evaluate(
|
||||
checkpoint_dir=checkpoint_dir,
|
||||
template=template
|
||||
))
|
||||
chat_model.tokenizer.padding_side = "left" # avoid overflow issue in batched inference for llama2
|
||||
chat_model.tokenizer.padding_side = "right" # avoid overflow issue in batched inference for llama2
|
||||
eval_template = eval_templates[lang]
|
||||
|
||||
category_corrects: Dict[str, np.ndarray] = {
|
||||
|
Loading…
x
Reference in New Issue
Block a user