From 96636c37299bce370e705bd81c5a1f8fc295764b Mon Sep 17 00:00:00 2001 From: JieShen <49408146+JieShenAI@users.noreply.github.com> Date: Tue, 25 Feb 2025 19:44:57 +0800 Subject: [PATCH] [script] add seed args (#7058) * add seed args * add seed args * update seed Former-commit-id: e8266fe5635470e84f9d39f43e53cc49f962c2e9 --- scripts/vllm_infer.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/vllm_infer.py b/scripts/vllm_infer.py index 4cdc290c..2b78fa97 100644 --- a/scripts/vllm_infer.py +++ b/scripts/vllm_infer.py @@ -45,6 +45,7 @@ def vllm_infer( top_k: int = 50, max_new_tokens: int = 1024, repetition_penalty: float = 1.0, + seed: int = None, pipeline_parallel_size: int = 1, image_max_pixels: int = 768 * 768, image_min_pixels: int = 32 * 32, @@ -108,6 +109,7 @@ def vllm_infer( stop_token_ids=template_obj.get_stop_token_ids(tokenizer), max_tokens=generating_args.max_new_tokens, skip_special_tokens=False, + seed=seed, ) if model_args.adapter_name_or_path is not None: lora_request = LoRARequest("default", 1, model_args.adapter_name_or_path[0])