Skip to content

Commit 725b1b4

Browse files
Isotr0pylulmer
authored andcommitted
[Bugfix] Fix broken vision language example (vllm-project#14292)
Signed-off-by: Isotr0py <[email protected]> Signed-off-by: Louis Ulmer <[email protected]>
1 parent aa5463d commit 725b1b4

File tree

1 file changed

+18
-22
lines changed

1 file changed

+18
-22
lines changed

examples/offline_inference/vision_language.py

Lines changed: 18 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -152,15 +152,13 @@ def run_h2ovl(questions: list[str], modality: str):
152152

153153
tokenizer = AutoTokenizer.from_pretrained(model_name,
154154
trust_remote_code=True)
155-
prompts = [
156-
tokenizer.apply_chat_template([{
157-
'role': 'user',
158-
'content': f"<image>\n{question}"
159-
}],
160-
tokenize=False,
161-
add_generation_prompt=True)
162-
for question in questions
163-
]
155+
messages = [[{
156+
'role': 'user',
157+
'content': f"<image>\n{question}"
158+
}] for question in questions]
159+
prompts = tokenizer.apply_chat_template(messages,
160+
tokenize=False,
161+
add_generation_prompt=True)
164162

165163
# Stop tokens for H2OVL-Mississippi
166164
# https://huggingface.co/h2oai/h2ovl-mississippi-800m
@@ -209,15 +207,13 @@ def run_internvl(questions: list[str], modality: str):
209207

210208
tokenizer = AutoTokenizer.from_pretrained(model_name,
211209
trust_remote_code=True)
212-
prompts = [
213-
tokenizer.apply_chat_template([{
214-
'role': 'user',
215-
'content': f"<image>\n{question}"
216-
}],
217-
tokenize=False,
218-
add_generation_prompt=True)
219-
for question in questions
220-
]
210+
messages = [[{
211+
'role': 'user',
212+
'content': f"<image>\n{question}"
213+
}] for question in questions]
214+
prompts = tokenizer.apply_chat_template(messages,
215+
tokenize=False,
216+
add_generation_prompt=True)
221217

222218
# Stop tokens for InternVL
223219
# models variants may have different stop tokens
@@ -399,7 +395,7 @@ def run_mllama(questions: list[str], modality: str):
399395
)
400396

401397
tokenizer = AutoTokenizer.from_pretrained(model_name)
402-
messages = [{
398+
messages = [[{
403399
"role":
404400
"user",
405401
"content": [{
@@ -408,7 +404,7 @@ def run_mllama(questions: list[str], modality: str):
408404
"type": "text",
409405
"text": f"{question}"
410406
}]
411-
} for question in questions]
407+
}] for question in questions]
412408
prompts = tokenizer.apply_chat_template(messages,
413409
add_generation_prompt=True,
414410
tokenize=False)
@@ -454,10 +450,10 @@ def run_nvlm_d(questions: list[str], modality: str):
454450

455451
tokenizer = AutoTokenizer.from_pretrained(model_name,
456452
trust_remote_code=True)
457-
messages = [{
453+
messages = [[{
458454
'role': 'user',
459455
'content': f"<image>\n{question}"
460-
} for question in questions]
456+
}] for question in questions]
461457
prompts = tokenizer.apply_chat_template(messages,
462458
tokenize=False,
463459
add_generation_prompt=True)

0 commit comments

Comments
 (0)