mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-12-14 19:06:26 +08:00
@@ -122,7 +122,7 @@ def evaluate(
|
|||||||
checkpoint_dir=checkpoint_dir,
|
checkpoint_dir=checkpoint_dir,
|
||||||
template=template
|
template=template
|
||||||
))
|
))
|
||||||
chat_model.tokenizer.padding_side = "left" # avoid overflow issue in batched inference for llama2
|
chat_model.tokenizer.padding_side = "right" # avoid overflow issue in batched inference for llama2
|
||||||
eval_template = eval_templates[lang]
|
eval_template = eval_templates[lang]
|
||||||
|
|
||||||
category_corrects: Dict[str, np.ndarray] = {
|
category_corrects: Dict[str, np.ndarray] = {
|
||||||
|
|||||||
Reference in New Issue
Block a user