mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-08-22 22:02:51 +08:00
Update vllm_engine.py
Former-commit-id: 17d0005b8cb9cf75b8247bcdf4ce022e1a5afd0b
This commit is contained in:
parent
4c91104471
commit
df66b288a2
@ -106,7 +106,6 @@ class VllmEngine(BaseEngine):
|
||||
top_k=top_k or generating_args["top_k"],
|
||||
num_return_sequences=num_return_sequences or 1,
|
||||
repetition_penalty=repetition_penalty or generating_args["repetition_penalty"],
|
||||
stop=stop or generating_args["stop"]
|
||||
)
|
||||
)
|
||||
|
||||
@ -124,10 +123,10 @@ class VllmEngine(BaseEngine):
|
||||
top_k=generating_args["top_k"],
|
||||
use_beam_search=generating_args["num_beams"] > 1,
|
||||
length_penalty=generating_args["length_penalty"],
|
||||
stop=stop,
|
||||
stop_token_ids=[self.tokenizer.eos_token_id] + self.tokenizer.additional_special_tokens_ids,
|
||||
max_tokens=generating_args["max_new_tokens"],
|
||||
skip_special_tokens=True,
|
||||
stop=generating_args["stop"],
|
||||
)
|
||||
|
||||
if self.processor is not None and image is not None:
|
||||
|
Loading…
x
Reference in New Issue
Block a user