Skip to content
This repository was archived by the owner on Oct 11, 2024. It is now read-only.

Commit 1699d33

Browse files
CatherineSueRobert Shaw
authored andcommitted
[BugFix] Fix test_phi3v.py (vllm-project#5725)
1 parent 1996acf commit 1699d33

File tree

2 files changed

+9
-5
lines changed

2 files changed

+9
-5
lines changed

tests/conftest.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -236,11 +236,13 @@ def generate_greedy(
236236
prompts: List[str],
237237
max_tokens: int,
238238
images: Optional[List[Image.Image]] = None,
239+
**kwargs,
239240
) -> List[Tuple[List[int], str]]:
240241
outputs = self.generate(prompts,
241242
do_sample=False,
242243
max_new_tokens=max_tokens,
243-
images=images)
244+
images=images,
245+
**kwargs)
244246

245247
return [(output_ids[0], output_str[0])
246248
for output_ids, output_str in outputs]

tests/models/test_phi3v.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ def vllm_to_hf_output(vllm_output: Tuple[List[int], str],
7777
# numeric difference for longer context and test can't pass
7878
@pytest.mark.parametrize("model_and_config", model_and_vl_config)
7979
@pytest.mark.parametrize("dtype", [target_dtype])
80-
@pytest.mark.parametrize("max_tokens", [8])
80+
@pytest.mark.parametrize("max_tokens", [128])
8181
def test_models(hf_runner, vllm_runner, hf_images, vllm_images,
8282
model_and_config, dtype: str, max_tokens: int) -> None:
8383
"""Inference result should be the same between hf and vllm.
@@ -95,9 +95,11 @@ def test_models(hf_runner, vllm_runner, hf_images, vllm_images,
9595
hf_model_kwargs = {"_attn_implementation": "eager"}
9696
with hf_runner(model_id, dtype=dtype,
9797
model_kwargs=hf_model_kwargs) as hf_model:
98-
hf_outputs = hf_model.generate_greedy(HF_IMAGE_PROMPTS,
99-
max_tokens,
100-
images=hf_images)
98+
hf_outputs = hf_model.generate_greedy(
99+
HF_IMAGE_PROMPTS,
100+
max_tokens,
101+
images=hf_images,
102+
eos_token_id=hf_model.processor.tokenizer.eos_token_id)
101103

102104
vllm_image_prompts = [
103105
p.replace("<|image_1|>",

0 commit comments

Comments
 (0)