mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-08-04 12:42:51 +08:00
fix tokenizer padding side in evaluate.py
Former-commit-id: 641ffa2f6ee05bda5b8548286c161c30aa0bcfb6
This commit is contained in:
parent
95697652f1
commit
77781d9516
@ -122,7 +122,7 @@ def evaluate(
|
|||||||
checkpoint_dir=checkpoint_dir,
|
checkpoint_dir=checkpoint_dir,
|
||||||
template=template
|
template=template
|
||||||
))
|
))
|
||||||
chat_model.tokenizer.padding_side = "left" # avoid overflow issue in batched inference for llama2
|
chat_model.tokenizer.padding_side = "right" # avoid overflow issue in batched inference for llama2
|
||||||
eval_template = eval_templates[lang]
|
eval_template = eval_templates[lang]
|
||||||
|
|
||||||
category_corrects: Dict[str, np.ndarray] = {
|
category_corrects: Dict[str, np.ndarray] = {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user