@@ -152,15 +152,13 @@ def run_h2ovl(questions: list[str], modality: str):
152
152
153
153
tokenizer = AutoTokenizer .from_pretrained (model_name ,
154
154
trust_remote_code = True )
155
- prompts = [
156
- tokenizer .apply_chat_template ([{
157
- 'role' : 'user' ,
158
- 'content' : f"<image>\n { question } "
159
- }],
160
- tokenize = False ,
161
- add_generation_prompt = True )
162
- for question in questions
163
- ]
155
+ messages = [[{
156
+ 'role' : 'user' ,
157
+ 'content' : f"<image>\n { question } "
158
+ }] for question in questions ]
159
+ prompts = tokenizer .apply_chat_template (messages ,
160
+ tokenize = False ,
161
+ add_generation_prompt = True )
164
162
165
163
# Stop tokens for H2OVL-Mississippi
166
164
# https://huggingface.co/h2oai/h2ovl-mississippi-800m
@@ -209,15 +207,13 @@ def run_internvl(questions: list[str], modality: str):
209
207
210
208
tokenizer = AutoTokenizer .from_pretrained (model_name ,
211
209
trust_remote_code = True )
212
- prompts = [
213
- tokenizer .apply_chat_template ([{
214
- 'role' : 'user' ,
215
- 'content' : f"<image>\n { question } "
216
- }],
217
- tokenize = False ,
218
- add_generation_prompt = True )
219
- for question in questions
220
- ]
210
+ messages = [[{
211
+ 'role' : 'user' ,
212
+ 'content' : f"<image>\n { question } "
213
+ }] for question in questions ]
214
+ prompts = tokenizer .apply_chat_template (messages ,
215
+ tokenize = False ,
216
+ add_generation_prompt = True )
221
217
222
218
# Stop tokens for InternVL
223
219
# models variants may have different stop tokens
@@ -399,7 +395,7 @@ def run_mllama(questions: list[str], modality: str):
399
395
)
400
396
401
397
tokenizer = AutoTokenizer .from_pretrained (model_name )
402
- messages = [{
398
+ messages = [[ {
403
399
"role" :
404
400
"user" ,
405
401
"content" : [{
@@ -408,7 +404,7 @@ def run_mllama(questions: list[str], modality: str):
408
404
"type" : "text" ,
409
405
"text" : f"{ question } "
410
406
}]
411
- } for question in questions ]
407
+ }] for question in questions ]
412
408
prompts = tokenizer .apply_chat_template (messages ,
413
409
add_generation_prompt = True ,
414
410
tokenize = False )
@@ -454,10 +450,10 @@ def run_nvlm_d(questions: list[str], modality: str):
454
450
455
451
tokenizer = AutoTokenizer .from_pretrained (model_name ,
456
452
trust_remote_code = True )
457
- messages = [{
453
+ messages = [[ {
458
454
'role' : 'user' ,
459
455
'content' : f"<image>\n { question } "
460
- } for question in questions ]
456
+ }] for question in questions ]
461
457
prompts = tokenizer .apply_chat_template (messages ,
462
458
tokenize = False ,
463
459
add_generation_prompt = True )
0 commit comments