Merge pull request #5546 from chengchengpei/cpei/refactor

1, log exceptions in details; 2, check processor is None before calling it

Former-commit-id: f8a2cc61054cec215d658159dc2d6fbacf6624e9
This commit is contained in:
hoshi-hiyouga 2024-10-08 17:46:54 +08:00 committed by GitHub
commit a95fe78ae2
2 changed files with 7 additions and 4 deletions

View File

@ -82,6 +82,8 @@ def load_tokenizer(model_args: "ModelArguments") -> "TokenizerModule":
padding_side="right",
**init_kwargs,
)
except Exception as e:
raise OSError("Failed to load tokenizer") from e
if model_args.new_special_tokens is not None:
num_added_tokens = tokenizer.add_special_tokens(
@ -97,12 +99,13 @@ def load_tokenizer(model_args: "ModelArguments") -> "TokenizerModule":
try:
processor = AutoProcessor.from_pretrained(model_args.model_name_or_path, **init_kwargs)
patch_processor(processor, config, tokenizer, model_args)
except Exception:
except Exception as e:
logger.warning("Failed to load processor. Error: {}".format(e))
processor = None
# Avoid load tokenizer, see:
# https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/models/auto/processing_auto.py#L324
if "Processor" not in processor.__class__.__name__:
if processor and "Processor" not in processor.__class__.__name__:
processor = None
return {"tokenizer": tokenizer, "processor": processor}

View File

@ -139,5 +139,5 @@ def export_model(args: Optional[Dict[str, Any]] = None) -> None:
model_args.export_hub_model_id, token=model_args.hf_hub_token
)
except Exception:
logger.warning("Cannot save tokenizer, please copy the files manually.")
except Exception as e:
logger.warning("Cannot save tokenizer, please copy the files manually. Error: {}".format(e))