update readme

This commit is contained in:
hiyouga
2024-05-27 18:14:02 +08:00
parent 026e88ab74
commit 5581cb2e4e
10 changed files with 71 additions and 62 deletions

View File

@@ -8,7 +8,6 @@ import torch
from transformers import GenerationConfig, TextIteratorStreamer
from ..data import get_template_and_fix_tokenizer
from ..extras.constants import IMAGE_TOKEN
from ..extras.misc import get_logits_processor
from ..model import load_model, load_tokenizer
from .base_engine import BaseEngine, Response
@@ -60,9 +59,9 @@ class HuggingfaceEngine(BaseEngine):
processor is not None
and image is not None
and not hasattr(processor, "image_seq_length")
and IMAGE_TOKEN not in messages[0]["content"]
and template.image_token not in messages[0]["content"]
): # llava-like models
messages[0]["content"] = IMAGE_TOKEN + messages[0]["content"]
messages[0]["content"] = template.image_token + messages[0]["content"]
paired_messages = messages + [{"role": "assistant", "content": ""}]
system = system or generating_args["default_system"]
@@ -75,7 +74,7 @@ class HuggingfaceEngine(BaseEngine):
batch_feature = image_processor(image, return_tensors="pt")
pixel_values = batch_feature.to(model.device)["pixel_values"] # shape (B, C, H, W)
if hasattr(processor, "image_seq_length"): # paligemma models
image_token_id = tokenizer.convert_tokens_to_ids(IMAGE_TOKEN)
image_token_id = tokenizer.convert_tokens_to_ids(template.image_token)
prompt_ids = [image_token_id] * getattr(processor, "image_seq_length") + prompt_ids
prompt_length = len(prompt_ids)